Phase 24-26: Hot path atomic telemetry prune (+2.00% cumulative)

Summary:
- Phase 24 (alloc stats): +0.93% GO
- Phase 25 (free stats): +1.07% GO
- Phase 26 (diagnostics): -0.33% NEUTRAL (code cleanliness)
- Total: 11 atomics compiled-out, +2.00% improvement

Phase 24: OBSERVE tax prune (tiny_class_stats_box.h)
- Added HAKMEM_TINY_CLASS_STATS_COMPILED (default: 0)
- Wrapped 5 stats functions: uc_miss, warm_hit, shared_lock, tls_carve_*
- Result: +0.93% (baseline 56.675M vs compiled-in 56.151M ops/s)

Phase 25: Tiny free stats prune (tiny_superslab_free.inc.h)
- Added HAKMEM_TINY_FREE_STATS_COMPILED (default: 0)
- Wrapped g_free_ss_enter atomic in free hot path
- Result: +1.07% (baseline 57.017M vs compiled-in 56.415M ops/s)

Phase 26: Hot path diagnostic atomics prune
- Added 5 compile gates for low-frequency error counters:
  - HAKMEM_TINY_C7_FREE_COUNT_COMPILED
  - HAKMEM_TINY_HDR_MISMATCH_LOG_COMPILED
  - HAKMEM_TINY_HDR_META_MISMATCH_COMPILED
  - HAKMEM_TINY_METRIC_BAD_CLASS_COMPILED
  - HAKMEM_TINY_HDR_META_FAST_COMPILED
- Result: -0.33% NEUTRAL (within noise, kept for cleanliness)

Alignment with mimalloc principles:
- "No atomics on hot path" - telemetry moved to compile-time opt-in
- Fixed per-op tax elimination
- Production builds: maximum performance (atomics compiled-out)
- Research builds: full diagnostics (COMPILED=1)

Generated with Claude Code
https://claude.com/claude-code

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Moe Charm (CI)
2025-12-16 05:35:11 +09:00
parent 4d9429e14c
commit 8052e8b320
32 changed files with 4979 additions and 2204 deletions

View File

@ -41,6 +41,7 @@
// ============================================================================
// Global atomic counters for unified cache performance measurement
// ENV: HAKMEM_MEASURE_UNIFIED_CACHE=1 to enable (default: OFF)
#if HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED
_Atomic uint64_t g_unified_cache_hits_global = 0;
_Atomic uint64_t g_unified_cache_misses_global = 0;
_Atomic uint64_t g_unified_cache_refill_cycles_global = 0;
@ -73,6 +74,7 @@ static inline int unified_cache_measure_enabled(void) {
}
return g_measure;
}
#endif
// Phase 23-E: Forward declarations
extern __thread TinyTLSSlab g_tls_slabs[TINY_NUM_CLASSES]; // From hakmem_tiny_superslab.c
@ -521,7 +523,7 @@ static inline int unified_refill_validate_base(int class_idx,
//
// This eliminates redundant header writes in hot allocation path.
static inline void unified_cache_prefill_headers(int class_idx, TinyUnifiedCache* cache, int start_tail, int count) {
#if HAKMEM_TINY_HEADER_CLASSIDX
#if HAKMEM_TINY_HEADER_CLASSIDX && HAKMEM_TINY_HEADER_WRITE_ONCE_COMPILED
// Only prefill if write-once optimization is enabled
if (!tiny_header_write_once_enabled()) return;
@ -555,12 +557,14 @@ static inline void unified_cache_prefill_headers(int class_idx, TinyUnifiedCache
// Design: Direct carve from SuperSlab to array (no TLS SLL intermediate layer)
// Warm Pool Integration: PRIORITIZE warm pool, use superslab_refill as fallback
hak_base_ptr_t unified_cache_refill(int class_idx) {
#if HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED
// Measure refill cost if enabled
uint64_t start_cycles = 0;
int measure = unified_cache_measure_enabled();
if (measure) {
start_cycles = read_tsc();
}
#endif
// Initialize warm pool on first use (per-thread)
tiny_warm_pool_init_once();
@ -637,6 +641,7 @@ hak_base_ptr_t unified_cache_refill(int class_idx) {
#endif
tiny_class_stats_on_uc_miss(class_idx);
#if HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED
if (measure) {
uint64_t end_cycles = read_tsc();
uint64_t delta = end_cycles - start_cycles;
@ -649,6 +654,7 @@ hak_base_ptr_t unified_cache_refill(int class_idx) {
atomic_fetch_add_explicit(&g_unified_cache_misses_by_class[class_idx],
1, memory_order_relaxed);
}
#endif
return HAK_BASE_FROM_RAW(first);
}
@ -809,6 +815,7 @@ hak_base_ptr_t unified_cache_refill(int class_idx) {
#endif
tiny_class_stats_on_uc_miss(class_idx);
#if HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED
if (measure) {
uint64_t end_cycles = read_tsc();
uint64_t delta = end_cycles - start_cycles;
@ -822,6 +829,7 @@ hak_base_ptr_t unified_cache_refill(int class_idx) {
atomic_fetch_add_explicit(&g_unified_cache_misses_by_class[class_idx],
1, memory_order_relaxed);
}
#endif
return HAK_BASE_FROM_RAW(first);
}
@ -958,6 +966,7 @@ hak_base_ptr_t unified_cache_refill(int class_idx) {
tiny_class_stats_on_uc_miss(class_idx);
// Measure refill cycles
#if HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED
if (measure) {
uint64_t end_cycles = read_tsc();
uint64_t delta = end_cycles - start_cycles;
@ -971,6 +980,7 @@ hak_base_ptr_t unified_cache_refill(int class_idx) {
atomic_fetch_add_explicit(&g_unified_cache_misses_by_class[class_idx],
1, memory_order_relaxed);
}
#endif
return HAK_BASE_FROM_RAW(first); // Return first block (BASE pointer)
}
@ -979,6 +989,9 @@ hak_base_ptr_t unified_cache_refill(int class_idx) {
// Performance Measurement: Print Statistics
// ============================================================================
void unified_cache_print_measurements(void) {
#if !HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED
return;
#else
if (!unified_cache_measure_enabled()) {
return; // Measurement disabled, nothing to print
}
@ -1039,4 +1052,5 @@ void unified_cache_print_measurements(void) {
}
fprintf(stderr, "========================================\n\n");
#endif
}