Files
hakmem/core/front/tiny_unified_cache.c
Moe Charm (CI) 860991ee50 Performance Measurement Framework: Unified Cache, TLS SLL, Shared Pool Analysis
## Summary

Implemented production-grade measurement infrastructure to quantify top 3 bottlenecks:
- Unified cache hit/miss rates + refill cost
- TLS SLL usage patterns
- Shared pool lock contention distribution

## Changes

### 1. Unified Cache Metrics (tiny_unified_cache.h/c)
- Added atomic counters:
  - g_unified_cache_hits_global: successful cache pops
  - g_unified_cache_misses_global: refill triggers
  - g_unified_cache_refill_cycles_global: refill cost in CPU cycles (rdtsc)
- Instrumented `unified_cache_pop_or_refill()` to count hits
- Instrumented `unified_cache_refill()` with cycle measurement
- ENV-gated: HAKMEM_MEASURE_UNIFIED_CACHE=1 (default: off)
- Added unified_cache_print_measurements() output function

### 2. TLS SLL Metrics (tls_sll_box.h)
- Added atomic counters:
  - g_tls_sll_push_count_global: total pushes
  - g_tls_sll_pop_count_global: successful pops
  - g_tls_sll_pop_empty_count_global: empty list conditions
- Instrumented push/pop paths
- Added tls_sll_print_measurements() output function

### 3. Shared Pool Contention (hakmem_shared_pool_acquire.c)
- Added atomic counters:
  - g_sp_stage2_lock_acquired_global: Stage 2 locks
  - g_sp_stage3_lock_acquired_global: Stage 3 allocations
  - g_sp_alloc_lock_contention_global: total lock acquisitions
- Instrumented all pthread_mutex_lock calls in hot paths
- Added shared_pool_print_measurements() output function

### 4. Benchmark Integration (bench_random_mixed.c)
- Called all 3 print functions after benchmark loop
- Functions active only when HAKMEM_MEASURE_UNIFIED_CACHE=1 set

## Design Principles

- **Zero overhead when disabled**: Inline checks with __builtin_expect hints
- **Atomic relaxed memory order**: Minimal synchronization overhead
- **ENV-gated**: Single flag controls all measurements
- **Production-safe**: Compiles in release builds, no functional changes

## Usage

```bash
HAKMEM_MEASURE_UNIFIED_CACHE=1 ./bench_allocators_hakmem bench_random_mixed_hakmem 1000000 256 42
```

Output (when enabled):
```
========================================
Unified Cache Statistics
========================================
Hits:        1234567
Misses:      56789
Hit Rate:    95.6%
Avg Refill Cycles: 1234

========================================
TLS SLL Statistics
========================================
Total Pushes:     1234567
Total Pops:       345678
Pop Empty Count:  12345
Hit Rate:         98.8%

========================================
Shared Pool Contention Statistics
========================================
Stage 2 Locks:    123456 (33%)
Stage 3 Locks:    234567 (67%)
Total Contention: 357 locks per 1M ops
```

## Next Steps

1. **Enable measurements** and run benchmarks to gather data
2. **Analyze miss rates**: Which bottleneck dominates?
3. **Profile hottest stage**: Focus optimization on top contributor
4. Possible targets:
   - Increase unified cache capacity if miss rate >5%
   - Profile if TLS SLL is unused (potential legacy code removal)
   - Analyze if Stage 2 lock can be replaced with CAS

## Makefile Updates

Added core/box/tiny_route_box.o to:
- OBJS_BASE (test build)
- SHARED_OBJS (shared library)
- BENCH_HAKMEM_OBJS_BASE (benchmark)
- TINY_BENCH_OBJS_BASE (tiny benchmark)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-04 18:26:39 +09:00

535 lines
21 KiB
C

// tiny_unified_cache.c - Phase 23: Unified Frontend Cache Implementation
#include "tiny_unified_cache.h"
#include "../tiny_tls.h" // Phase 23-E: TinyTLSSlab, TinySlabMeta
#include "../tiny_box_geometry.h" // Phase 23-E: tiny_stride_for_class, tiny_slab_base_for_geometry
#include "../box/tiny_next_ptr_box.h" // Phase 23-E: tiny_next_read (freelist traversal)
#include "../hakmem_tiny_superslab.h" // Phase 23-E: SuperSlab, superslab_refill()
#include "../superslab/superslab_inline.h" // Phase 23-E: ss_active_add, slab_index_for, ss_slabs_capacity
#include "../hakmem_super_registry.h" // For hak_super_lookup (pointer→SuperSlab)
#include "../box/pagefault_telemetry_box.h" // Phase 24: Box PageFaultTelemetry (Tiny page touch stats)
#include "../hakmem_env_cache.h" // Priority-2: ENV cache (eliminate syscalls)
#include <stdlib.h>
#include <string.h>
#include <stdatomic.h>
#include <time.h>
// ============================================================================
// Performance Measurement: Unified Cache (ENV-gated)
// ============================================================================
// Global atomic counters for unified cache performance measurement
// ENV: HAKMEM_MEASURE_UNIFIED_CACHE=1 to enable (default: OFF)
_Atomic uint64_t g_unified_cache_hits_global = 0;
_Atomic uint64_t g_unified_cache_misses_global = 0;
_Atomic uint64_t g_unified_cache_refill_cycles_global = 0;
// Helper: Get cycle count (x86_64 rdtsc)
static inline uint64_t read_tsc(void) {
#if defined(__x86_64__) || defined(_M_X64)
uint32_t lo, hi;
__asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi));
return ((uint64_t)hi << 32) | lo;
#else
// Fallback to clock_gettime for non-x86 platforms
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (uint64_t)ts.tv_sec * 1000000000ULL + (uint64_t)ts.tv_nsec;
#endif
}
// Check if measurement is enabled (cached)
static inline int unified_cache_measure_enabled(void) {
static int g_measure = -1;
if (__builtin_expect(g_measure == -1, 0)) {
const char* e = getenv("HAKMEM_MEASURE_UNIFIED_CACHE");
g_measure = (e && *e && *e != '0') ? 1 : 0;
}
return g_measure;
}
// Phase 23-E: Forward declarations
extern __thread TinyTLSSlab g_tls_slabs[TINY_NUM_CLASSES]; // From hakmem_tiny_superslab.c
// ============================================================================
// TLS Variables (defined here, extern in header)
// ============================================================================
__thread TinyUnifiedCache g_unified_cache[TINY_NUM_CLASSES];
// ============================================================================
// Metrics (Phase 23, optional for debugging)
// ============================================================================
#if !HAKMEM_BUILD_RELEASE
__thread uint64_t g_unified_cache_hit[TINY_NUM_CLASSES] = {0};
__thread uint64_t g_unified_cache_miss[TINY_NUM_CLASSES] = {0};
__thread uint64_t g_unified_cache_push[TINY_NUM_CLASSES] = {0};
__thread uint64_t g_unified_cache_full[TINY_NUM_CLASSES] = {0};
#endif
// ============================================================================
// Phase 8-Step1-Fix: unified_cache_enabled() implementation (non-static)
// ============================================================================
// Enable flag (default: ON, disable with HAKMEM_TINY_UNIFIED_CACHE=0)
int unified_cache_enabled(void) {
// Priority-2: Use cached ENV (eliminate lazy-init static overhead)
static int g_enable = -1;
if (__builtin_expect(g_enable == -1, 0)) {
g_enable = HAK_ENV_TINY_UNIFIED_CACHE();
#if !HAKMEM_BUILD_RELEASE
if (g_enable) {
fprintf(stderr, "[Unified-INIT] unified_cache_enabled() = %d\n", g_enable);
fflush(stderr);
}
#endif
}
return g_enable;
}
// ============================================================================
// Init (called at thread start or lazy on first access)
// ============================================================================
void unified_cache_init(void) {
if (!unified_cache_enabled()) return;
// Layer 2 Defensive Fix: Use __libc_calloc for infrastructure allocations
// Rationale: Cache arrays are infrastructure (not workload), bypass HAKMEM entirely
// This prevents interaction with BenchFast mode and ensures clean separation
extern void* __libc_calloc(size_t, size_t);
// Initialize all classes (C0-C7)
for (int cls = 0; cls < TINY_NUM_CLASSES; cls++) {
if (g_unified_cache[cls].slots != NULL) continue; // Already initialized
size_t cap = unified_capacity(cls);
g_unified_cache[cls].slots = (void**)__libc_calloc(cap, sizeof(void*));
if (!g_unified_cache[cls].slots) {
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr, "[Unified-INIT] Failed to allocate C%d cache (%zu slots)\n", cls, cap);
fflush(stderr);
#endif
continue; // Skip this class, try others
}
g_unified_cache[cls].capacity = (uint16_t)cap;
g_unified_cache[cls].mask = (uint16_t)(cap - 1);
g_unified_cache[cls].head = 0;
g_unified_cache[cls].tail = 0;
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr, "[Unified-INIT] C%d: %zu slots (%zu bytes)\n",
cls, cap, cap * sizeof(void*));
fflush(stderr);
#endif
}
}
// ============================================================================
// Shutdown (called at thread exit, optional)
// ============================================================================
void unified_cache_shutdown(void) {
if (!unified_cache_enabled()) return;
// TODO: Drain caches to SuperSlab before shutdown (prevent leak)
// Layer 2 Defensive Fix: Use __libc_free (symmetric with __libc_calloc in init)
extern void __libc_free(void*);
// Free cache buffers
for (int cls = 0; cls < TINY_NUM_CLASSES; cls++) {
if (g_unified_cache[cls].slots) {
__libc_free(g_unified_cache[cls].slots);
g_unified_cache[cls].slots = NULL;
}
}
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr, "[Unified-SHUTDOWN] All caches freed\n");
fflush(stderr);
#endif
}
// ============================================================================
// Stats (Phase 23 metrics)
// ============================================================================
void unified_cache_print_stats(void) {
if (!unified_cache_enabled()) return;
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr, "\n[Unified-STATS] Unified Cache Metrics:\n");
for (int cls = 0; cls < TINY_NUM_CLASSES; cls++) {
uint64_t total_allocs = g_unified_cache_hit[cls] + g_unified_cache_miss[cls];
uint64_t total_frees = g_unified_cache_push[cls] + g_unified_cache_full[cls];
if (total_allocs == 0 && total_frees == 0) continue; // Skip unused classes
double hit_rate = (total_allocs > 0) ? (100.0 * g_unified_cache_hit[cls] / total_allocs) : 0.0;
double full_rate = (total_frees > 0) ? (100.0 * g_unified_cache_full[cls] / total_frees) : 0.0;
// Current occupancy
uint16_t count = (g_unified_cache[cls].tail >= g_unified_cache[cls].head)
? (g_unified_cache[cls].tail - g_unified_cache[cls].head)
: (g_unified_cache[cls].capacity - g_unified_cache[cls].head + g_unified_cache[cls].tail);
fprintf(stderr, " C%d: %u/%u slots occupied, hit=%llu miss=%llu (%.1f%% hit), push=%llu full=%llu (%.1f%% full)\n",
cls,
count, g_unified_cache[cls].capacity,
(unsigned long long)g_unified_cache_hit[cls],
(unsigned long long)g_unified_cache_miss[cls],
hit_rate,
(unsigned long long)g_unified_cache_push[cls],
(unsigned long long)g_unified_cache_full[cls],
full_rate);
}
fflush(stderr);
#endif
}
// ============================================================================
// Phase 23-E: Direct SuperSlab Carve (TLS SLL Bypass)
// ============================================================================
// Fail-fast helper: verify that a candidate BASE pointer belongs to a valid
// Tiny slab within a SuperSlab. This is intentionally defensive and only
// compiled in debug builds to avoid hot-path overhead in release.
static inline int unified_refill_validate_base(int class_idx,
TinyTLSSlab* tls,
TinySlabMeta* meta,
void* base,
const char* stage)
{
#if HAKMEM_BUILD_RELEASE
(void)class_idx; (void)tls; (void)base; (void)stage;
return 1;
#else
if (!base) {
fprintf(stderr,
"[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=NULL tls_ss=%p meta=%p\n",
stage ? stage : "unified_refill",
class_idx,
(void*)(tls ? tls->ss : NULL),
(void*)meta);
abort();
}
SuperSlab* tls_ss = tls ? tls->ss : NULL;
if (!tls_ss || tls_ss->magic != SUPERSLAB_MAGIC) {
fprintf(stderr,
"[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p meta=%p (invalid TLS ss)\n",
stage ? stage : "unified_refill",
class_idx,
base,
(void*)tls_ss,
(void*)meta);
abort();
}
// Cross-check registry lookup for additional safety.
SuperSlab* ss_lookup = hak_super_lookup(base);
if (!ss_lookup || ss_lookup->magic != SUPERSLAB_MAGIC) {
fprintf(stderr,
"[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p lookup_ss=%p meta=%p\n",
stage ? stage : "unified_refill",
class_idx,
base,
(void*)tls_ss,
(void*)ss_lookup,
(void*)meta);
abort();
}
if (ss_lookup != tls_ss) {
fprintf(stderr,
"[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p lookup_ss=%p (mismatch)\n",
stage ? stage : "unified_refill",
class_idx,
base,
(void*)tls_ss,
(void*)ss_lookup);
abort();
}
int slab_idx = tls ? (int)tls->slab_idx : -1;
int cap = ss_slabs_capacity(tls_ss);
if (slab_idx < 0 || slab_idx >= cap) {
fprintf(stderr,
"[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p slab_idx=%d cap=%d meta_cap=%u meta_used=%u meta_carved=%u\n",
stage ? stage : "unified_refill",
class_idx,
base,
(void*)tls_ss,
slab_idx,
cap,
meta ? meta->capacity : 0u,
meta ? (unsigned)meta->used : 0u,
meta ? (unsigned)meta->carved : 0u);
abort();
}
// Ensure meta matches TLS view for this slab.
TinySlabMeta* expected_meta = &tls_ss->slabs[slab_idx];
if (meta && meta != expected_meta) {
fprintf(stderr,
"[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p slab_idx=%d meta=%p expected_meta=%p\n",
stage ? stage : "unified_refill",
class_idx,
base,
(void*)tls_ss,
slab_idx,
(void*)meta,
(void*)expected_meta);
abort();
}
uint8_t* slab_base = tiny_slab_base_for_geometry(tls_ss, slab_idx);
size_t stride = tiny_stride_for_class(class_idx);
size_t usable = tiny_usable_bytes_for_slab(slab_idx);
uint8_t* slab_end = slab_base + usable;
if ((uint8_t*)base < slab_base || (uint8_t*)base >= slab_end) {
fprintf(stderr,
"[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p range=[%p,%p) stride=%zu meta_cap=%u meta_used=%u meta_carved=%u\n",
stage ? stage : "unified_refill",
class_idx,
base,
(void*)slab_base,
(void*)slab_end,
stride,
meta ? meta->capacity : 0u,
meta ? (unsigned)meta->used : 0u,
meta ? (unsigned)meta->carved : 0u);
abort();
}
ptrdiff_t offset = (uint8_t*)base - slab_base;
if (offset % (ptrdiff_t)stride != 0) {
fprintf(stderr,
"[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p offset=%td stride=%zu (misaligned) meta_cap=%u meta_used=%u meta_carved=%u\n",
stage ? stage : "unified_refill",
class_idx,
base,
offset,
stride,
meta ? meta->capacity : 0u,
meta ? (unsigned)meta->used : 0u,
meta ? (unsigned)meta->carved : 0u);
abort();
}
return 1;
#endif
}
// Batch refill from SuperSlab (called on cache miss)
// Returns: BASE pointer (first block, wrapped), or NULL-wrapped if failed
// Design: Direct carve from SuperSlab to array (no TLS SLL intermediate layer)
hak_base_ptr_t unified_cache_refill(int class_idx) {
// Measure refill cost if enabled
uint64_t start_cycles = 0;
int measure = unified_cache_measure_enabled();
if (measure) {
start_cycles = read_tsc();
}
TinyTLSSlab* tls = &g_tls_slabs[class_idx];
// Step 1: Ensure SuperSlab available
if (!tls->ss) {
if (!superslab_refill(class_idx)) return HAK_BASE_FROM_RAW(NULL);
tls = &g_tls_slabs[class_idx]; // Reload after refill
}
TinyUnifiedCache* cache = &g_unified_cache[class_idx];
// ✅ Phase 11+: Ensure cache is initialized (lazy init for cold path)
if (!cache->slots) {
unified_cache_init();
// Re-check after init (may fail due to alloc failure)
if (!cache->slots) {
return NULL;
}
}
// Step 2: Calculate available room in unified cache
int room = (int)cache->capacity - 1; // Leave 1 slot for full detection
if (cache->head > cache->tail) {
room = cache->head - cache->tail - 1;
} else if (cache->head < cache->tail) {
room = cache->capacity - (cache->tail - cache->head) - 1;
}
if (room <= 0) return HAK_BASE_FROM_RAW(NULL);
if (room > 128) room = 128; // Batch size limit
// Step 3: Direct carve from SuperSlab into local array (bypass TLS SLL!)
void* out[128];
int produced = 0;
TinySlabMeta* m = tls->meta;
size_t bs = tiny_stride_for_class(class_idx);
uint8_t* base = tls->slab_base
? tls->slab_base
: tiny_slab_base_for_geometry(tls->ss, tls->slab_idx);
while (produced < room) {
if (m->freelist) {
// Freelist pop
void* p = m->freelist;
// Validate freelist head before dereferencing
do {
SuperSlab* fl_ss = hak_super_lookup(p);
int fl_cap = fl_ss ? ss_slabs_capacity(fl_ss) : 0;
int fl_idx = (fl_ss && fl_ss->magic == SUPERSLAB_MAGIC) ? slab_index_for(fl_ss, p) : -1;
uint8_t fl_cls = (fl_idx >= 0 && fl_idx < fl_cap) ? fl_ss->slabs[fl_idx].class_idx : 0xff;
if (!fl_ss || fl_ss->magic != SUPERSLAB_MAGIC ||
fl_idx != tls->slab_idx || fl_ss != tls->ss ||
fl_cls != (uint8_t)class_idx) {
static _Atomic uint32_t g_fl_invalid = 0;
uint32_t shot = atomic_fetch_add_explicit(&g_fl_invalid, 1, memory_order_relaxed);
if (shot < 8) {
fprintf(stderr,
"[UNIFIED_FREELIST_INVALID] cls=%d p=%p ss=%p slab=%d meta_used=%u tls_ss=%p tls_slab=%d cls_meta=%u\n",
class_idx,
p,
(void*)fl_ss,
fl_idx,
m->used,
(void*)tls->ss,
tls->slab_idx,
(unsigned)fl_cls);
}
// Drop invalid freelist to avoid SEGV and force slow refill
m->freelist = NULL;
p = NULL;
}
} while (0);
if (!p) {
break;
}
void* next_node = tiny_next_read(class_idx, p);
// ROOT CAUSE FIX: Write header BEFORE exposing block (but AFTER reading next)
// For Class 0 (offset 0), next overlaps header, so we must read next first.
#if HAKMEM_TINY_HEADER_CLASSIDX
*(uint8_t*)p = (uint8_t)(0xa0 | (class_idx & 0x0f));
// Prevent compiler from reordering header write after out[] assignment
__atomic_thread_fence(__ATOMIC_RELEASE);
#endif
m->freelist = next_node;
unified_refill_validate_base(class_idx, tls, m, p,
"unified_refill_freelist");
// PageFaultTelemetry: record page touch for this BASE
pagefault_telemetry_touch(class_idx, p);
m->used++;
out[produced++] = p;
} else if (m->carved < m->capacity) {
// Linear carve (fresh block, no freelist link)
void* p = (void*)(base + ((size_t)m->carved * bs));
unified_refill_validate_base(class_idx, tls, m, p,
"unified_refill_carve");
// PageFaultTelemetry: record page touch for this BASE
pagefault_telemetry_touch(class_idx, p);
// ✅ CRITICAL: Write header (new block)
#if HAKMEM_TINY_HEADER_CLASSIDX
*(uint8_t*)p = (uint8_t)(0xa0 | (class_idx & 0x0f));
#endif
m->carved++;
m->used++;
out[produced++] = p;
} else {
// SuperSlab exhausted → refill and retry
if (!superslab_refill(class_idx)) break;
// ✅ CRITICAL: Reload TLS pointers after refill (avoid stale pointer bug)
tls = &g_tls_slabs[class_idx];
m = tls->meta;
base = tls->slab_base
? tls->slab_base
: tiny_slab_base_for_geometry(tls->ss, tls->slab_idx);
}
}
if (produced == 0) return HAK_BASE_FROM_RAW(NULL);
// Step 4: Update active counter
// Guard: tls->ss can be NULL if all SuperSlab refills failed
if (tls->ss) {
ss_active_add(tls->ss, (uint32_t)produced);
}
// Step 5: Store blocks into unified cache (skip first, return it)
void* first = out[0];
for (int i = 1; i < produced; i++) {
cache->slots[cache->tail] = out[i];
cache->tail = (cache->tail + 1) & cache->mask;
}
#if !HAKMEM_BUILD_RELEASE
g_unified_cache_miss[class_idx]++;
#endif
// Measure refill cycles
if (measure) {
uint64_t end_cycles = read_tsc();
uint64_t delta = end_cycles - start_cycles;
atomic_fetch_add_explicit(&g_unified_cache_refill_cycles_global, delta, memory_order_relaxed);
atomic_fetch_add_explicit(&g_unified_cache_misses_global, 1, memory_order_relaxed);
}
return HAK_BASE_FROM_RAW(first); // Return first block (BASE pointer)
}
// ============================================================================
// Performance Measurement: Print Statistics
// ============================================================================
void unified_cache_print_measurements(void) {
if (!unified_cache_measure_enabled()) {
return; // Measurement disabled, nothing to print
}
uint64_t hits = atomic_load_explicit(&g_unified_cache_hits_global, memory_order_relaxed);
uint64_t misses = atomic_load_explicit(&g_unified_cache_misses_global, memory_order_relaxed);
uint64_t refill_cycles = atomic_load_explicit(&g_unified_cache_refill_cycles_global, memory_order_relaxed);
uint64_t total = hits + misses;
if (total == 0) {
fprintf(stderr, "\n========================================\n");
fprintf(stderr, "Unified Cache Statistics\n");
fprintf(stderr, "========================================\n");
fprintf(stderr, "No operations recorded (measurement may be disabled)\n");
fprintf(stderr, "========================================\n\n");
return;
}
double hit_rate = (100.0 * hits) / total;
double avg_refill_cycles = misses > 0 ? (double)refill_cycles / misses : 0.0;
// Estimate time at 1GHz (conservative, most modern CPUs are 2-4GHz)
double avg_refill_us = avg_refill_cycles / 1000.0;
fprintf(stderr, "\n========================================\n");
fprintf(stderr, "Unified Cache Statistics\n");
fprintf(stderr, "========================================\n");
fprintf(stderr, "Hits: %llu\n", (unsigned long long)hits);
fprintf(stderr, "Misses: %llu\n", (unsigned long long)misses);
fprintf(stderr, "Hit Rate: %.1f%%\n", hit_rate);
fprintf(stderr, "Avg Refill Cycles: %.0f (est. %.2fus @ 1GHz)\n", avg_refill_cycles, avg_refill_us);
fprintf(stderr, "========================================\n\n");
}