Debug Counters Implementation - Clean History
Major Features: - Debug counter infrastructure for Refill Stage tracking - Free Pipeline counters (ss_local, ss_remote, tls_sll) - Diagnostic counters for early return analysis - Unified larson.sh benchmark runner with profiles - Phase 6-3 regression analysis documentation Bug Fixes: - Fix SuperSlab disabled by default (HAKMEM_TINY_USE_SUPERSLAB) - Fix profile variable naming consistency - Add .gitignore patterns for large files Performance: - Phase 6-3: 4.79 M ops/s (has OOM risk) - With SuperSlab: 3.13 M ops/s (+19% improvement) This is a clean repository without large log files. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
125
analyze_final.py
Normal file
125
analyze_final.py
Normal file
@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
analyze_final.py - Final analysis with jemalloc/mimalloc
|
||||
"""
|
||||
|
||||
import csv
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
import statistics
|
||||
|
||||
def load_results(filename):
|
||||
"""Load CSV results"""
|
||||
data = defaultdict(lambda: defaultdict(list))
|
||||
|
||||
with open(filename, 'r') as f:
|
||||
reader = csv.DictReader(f)
|
||||
for row in reader:
|
||||
allocator = row['allocator']
|
||||
scenario = row['scenario']
|
||||
avg_ns = int(row['avg_ns'])
|
||||
soft_pf = int(row['soft_pf'])
|
||||
|
||||
data[scenario][allocator].append({
|
||||
'avg_ns': avg_ns,
|
||||
'soft_pf': soft_pf,
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
def analyze(data):
|
||||
"""Analyze with 5 allocators"""
|
||||
print("=" * 100)
|
||||
print("🔥 FINAL BATTLE: hakmem vs system vs jemalloc vs mimalloc (50 runs)")
|
||||
print("=" * 100)
|
||||
print()
|
||||
|
||||
for scenario in ['json', 'mir', 'vm', 'mixed']:
|
||||
print(f"## {scenario.upper()} Scenario")
|
||||
print("-" * 100)
|
||||
|
||||
allocators = ['hakmem-baseline', 'hakmem-evolving', 'system', 'jemalloc', 'mimalloc']
|
||||
|
||||
# Header
|
||||
print(f"{'Allocator':<20} {'Median (ns)':<15} {'P95 (ns)':<15} {'P99 (ns)':<15} {'vs Best':<15}")
|
||||
print("-" * 100)
|
||||
|
||||
results = {}
|
||||
for allocator in allocators:
|
||||
if allocator not in data[scenario]:
|
||||
continue
|
||||
|
||||
latencies = [r['avg_ns'] for r in data[scenario][allocator]]
|
||||
|
||||
if not latencies:
|
||||
continue
|
||||
|
||||
median_ns = statistics.median(latencies)
|
||||
p95_ns = statistics.quantiles(latencies, n=20)[18] if len(latencies) >= 20 else max(latencies)
|
||||
p99_ns = statistics.quantiles(latencies, n=100)[98] if len(latencies) >= 100 else max(latencies)
|
||||
|
||||
results[allocator] = median_ns
|
||||
|
||||
# Find winner
|
||||
if results:
|
||||
best_allocator = min(results, key=results.get)
|
||||
best_time = results[best_allocator]
|
||||
|
||||
for allocator in allocators:
|
||||
if allocator not in results:
|
||||
continue
|
||||
|
||||
median_ns = results[allocator]
|
||||
latencies = [r['avg_ns'] for r in data[scenario][allocator]]
|
||||
p95_ns = statistics.quantiles(latencies, n=20)[18] if len(latencies) >= 20 else max(latencies)
|
||||
p99_ns = statistics.quantiles(latencies, n=100)[98] if len(latencies) >= 100 else max(latencies)
|
||||
|
||||
if allocator == best_allocator:
|
||||
vs_best = "🥇 WINNER"
|
||||
else:
|
||||
slowdown_pct = ((median_ns - best_time) / best_time) * 100
|
||||
vs_best = f"+{slowdown_pct:.1f}%"
|
||||
|
||||
print(f"{allocator:<20} {median_ns:<15.1f} {p95_ns:<15.1f} {p99_ns:<15.1f} {vs_best:<15}")
|
||||
|
||||
print()
|
||||
|
||||
# Overall summary
|
||||
print("=" * 100)
|
||||
print("📊 OVERALL SUMMARY")
|
||||
print("=" * 100)
|
||||
|
||||
overall_scores = defaultdict(int)
|
||||
|
||||
for scenario in ['json', 'mir', 'vm', 'mixed']:
|
||||
allocators = ['hakmem-baseline', 'hakmem-evolving', 'system', 'jemalloc', 'mimalloc']
|
||||
results = {}
|
||||
|
||||
for allocator in allocators:
|
||||
if allocator in data[scenario] and data[scenario][allocator]:
|
||||
latencies = [r['avg_ns'] for r in data[scenario][allocator]]
|
||||
results[allocator] = statistics.median(latencies)
|
||||
|
||||
if results:
|
||||
sorted_allocators = sorted(results.items(), key=lambda x: x[1])
|
||||
|
||||
for rank, (allocator, _) in enumerate(sorted_allocators):
|
||||
points = len(sorted_allocators) - rank
|
||||
overall_scores[allocator] += points
|
||||
|
||||
print("\nPoints System (5 points for 1st, 4 for 2nd, etc.):\n")
|
||||
sorted_scores = sorted(overall_scores.items(), key=lambda x: x[1], reverse=True)
|
||||
|
||||
for rank, (allocator, points) in enumerate(sorted_scores, 1):
|
||||
medal = "🥇" if rank == 1 else "🥈" if rank == 2 else "🥉" if rank == 3 else " "
|
||||
print(f"{medal} #{rank}: {allocator:<20} {points} points")
|
||||
|
||||
print()
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 2:
|
||||
print(f"Usage: {sys.argv[0]} <results.csv>")
|
||||
sys.exit(1)
|
||||
|
||||
data = load_results(sys.argv[1])
|
||||
analyze(data)
|
||||
Reference in New Issue
Block a user