// tiny_unified_cache.h - Phase 23: Unified Frontend Cache (tcache-style) // // Goal: Flatten 4-5 layer frontend cascade into single-layer array cache // Target: +50-100% performance (20.3M → 30-40M ops/s) // // Design (Task-sensei analysis): // - Replace: Ring → FastCache → SFC → TLS SLL (4 layers, 8-10 cache misses) // - With: Single unified array cache per class (1 layer, 2-3 cache misses) // - Fallback: Direct SuperSlab refill (skip intermediate layers) // // Performance: // - Alloc: 2-3 cache misses (TLS access + array access) // - Free: 2-3 cache misses (similar to System malloc tcache) // - vs Current: 8-10 cache misses → 2-3 cache misses (70% reduction) // // ENV Variables: // HAKMEM_TINY_UNIFIED_CACHE=1 # Enable Unified cache (default: 0, OFF) // HAKMEM_TINY_UNIFIED_C0=128 # C0 cache size (default: 128) // ... // HAKMEM_TINY_UNIFIED_C7=128 # C7 cache size (default: 128) #ifndef HAK_FRONT_TINY_UNIFIED_CACHE_H #define HAK_FRONT_TINY_UNIFIED_CACHE_H #include #include #include #include #include "../hakmem_build_flags.h" #include "../hakmem_tiny_config.h" // For TINY_NUM_CLASSES #include "../box/ptr_type_box.h" // Phantom pointer types (BASE/USER) #include "../box/tiny_front_config_box.h" // Phase 8-Step1: Config macros #include "../box/tiny_tcache_box.h" // Phase 14 v1: Intrusive LIFO tcache #include "../box/tiny_unified_cache_hitpath_env_box.h" // Phase 74: LOCALIZE ENV gate // ============================================================================ // Phase 3 C2 Patch 3: Bounds Check Compile-out // ============================================================================ // Hardcode unified cache capacity as macro constants for compile-time optimization // This allows the compiler to optimize modulo operations into bitwise AND #define TINY_UNIFIED_CACHE_CAPACITY_POW2 11 #define TINY_UNIFIED_CACHE_CAPACITY (1 << TINY_UNIFIED_CACHE_CAPACITY_POW2) // 2048 #define TINY_UNIFIED_CACHE_MASK (TINY_UNIFIED_CACHE_CAPACITY - 1) // 2047 // ============================================================================ // Performance Measurement: Unified Cache (ENV-gated) // ============================================================================ // Global atomic counters for production performance measurement // ENV: HAKMEM_MEASURE_UNIFIED_CACHE=1 to enable (default: OFF) extern _Atomic uint64_t g_unified_cache_hits_global; extern _Atomic uint64_t g_unified_cache_misses_global; extern _Atomic uint64_t g_unified_cache_refill_cycles_global; // Per-class counters(観測用 Box、ENV でのみ有効) extern _Atomic uint64_t g_unified_cache_hits_by_class[TINY_NUM_CLASSES]; extern _Atomic uint64_t g_unified_cache_misses_by_class[TINY_NUM_CLASSES]; extern _Atomic uint64_t g_unified_cache_refill_cycles_by_class[TINY_NUM_CLASSES]; // Print statistics function void unified_cache_print_measurements(void); // Check if measurement is enabled (inline for hot path) static inline int unified_cache_measure_check(void) { static int g_measure = -1; if (__builtin_expect(g_measure == -1, 0)) { const char* e = getenv("HAKMEM_MEASURE_UNIFIED_CACHE"); g_measure = (e && *e && *e != '0') ? 1 : 0; } return g_measure; } // ============================================================================ // Unified Cache Structure (per class) // ============================================================================ typedef struct __attribute__((aligned(64))) { // slots は BASE ポインタ群を保持する(ユーザポインタではない)。 // API では hak_base_ptr_t で型安全に扱い、内部表現は void* のまま。 void** slots; // Dynamic array of BASE pointers (allocated at init) uint16_t head; // Pop index (consumer) uint16_t tail; // Push index (producer) uint16_t capacity; // Cache size (power of 2 for fast modulo: & (capacity-1)) uint16_t mask; // Capacity - 1 (for fast modulo) } TinyUnifiedCache; // ============================================================================ // External TLS Variables (defined in tiny_unified_cache.c) // ============================================================================ extern __thread TinyUnifiedCache g_unified_cache[TINY_NUM_CLASSES]; // ============================================================================ // Metrics (Phase 23, optional for debugging) // ============================================================================ #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED extern __thread uint64_t g_unified_cache_hit[TINY_NUM_CLASSES]; // Alloc hits extern __thread uint64_t g_unified_cache_miss[TINY_NUM_CLASSES]; // Alloc misses extern __thread uint64_t g_unified_cache_push[TINY_NUM_CLASSES]; // Free pushes extern __thread uint64_t g_unified_cache_full[TINY_NUM_CLASSES]; // Free full (fallback to SuperSlab) #else // Release-side lightweight C7 warm path counters (for smoke validation) extern _Atomic uint64_t g_rel_c7_warm_pop; extern _Atomic uint64_t g_rel_c7_warm_push; #endif // ============================================================================ // ENV Control (cached, lazy init) // ============================================================================ // Phase 8-Step1-Fix: Forward declaration only (implementation in .c file) // Enable flag (default: 0, OFF) - implemented in tiny_unified_cache.c int unified_cache_enabled(void); // Per-class capacity (default: Hot_2048 strategy - optimized for 256B workload) // Phase 23 Capacity Optimization Result: Hot_2048 = 14.63M ops/s (+43% vs baseline) // Hot classes (C2/C3: 128B/256B) get 2048 slots, others get 64 slots static inline size_t unified_capacity(int class_idx) { static size_t g_cap[TINY_NUM_CLASSES] = {0}; if (__builtin_expect(g_cap[class_idx] == 0, 0)) { char env_name[64]; snprintf(env_name, sizeof(env_name), "HAKMEM_TINY_UNIFIED_C%d", class_idx); const char* e = getenv(env_name); // Default: Hot_2048 strategy // - C2/C3 (128B/256B): 2048 slots(超ホット Tiny) // - C5/C6/C7 (>=129B): 128 slots(Mixed 用に拡張) // - その他: 64 slots(コールド) size_t default_cap = 64; // Cold classes if (class_idx == 2 || class_idx == 3) { default_cap = 2048; // Hot Tiny classes (128B, 256B) } else if (class_idx >= 5 && class_idx <= 7) { default_cap = 128; // Mixed workload classes (C5-C7: 129B-1024B) } g_cap[class_idx] = (e && *e) ? (size_t)atoi(e) : default_cap; // Round up to power of 2 (for fast modulo) if (g_cap[class_idx] < 32) g_cap[class_idx] = 32; if (g_cap[class_idx] > 4096) g_cap[class_idx] = 4096; // Increased limit for Hot_2048 // Ensure power of 2 size_t pow2 = 32; while (pow2 < g_cap[class_idx]) pow2 *= 2; g_cap[class_idx] = pow2; #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED fprintf(stderr, "[Unified-INIT] C%d capacity = %zu (power of 2)\n", class_idx, g_cap[class_idx]); fflush(stderr); #endif } return g_cap[class_idx]; } // ============================================================================ // Init/Shutdown Forward Declarations // ============================================================================ void unified_cache_init(void); void unified_cache_shutdown(void); void unified_cache_print_stats(void); // ============================================================================ // Phase 23-D: Self-Contained Refill (Box U1 + Box U2 integration) // ============================================================================ // Batch refill from SuperSlab (called on cache miss) // Returns: BASE pointer (first block), or NULL if failed void* unified_cache_refill(int class_idx); // ============================================================================ // Ultra-Fast Pop/Push (2-3 cache misses, tcache-style) // ============================================================================ // Pop from unified cache (alloc fast path) // Returns: BASE pointer (wrapped hak_base_ptr_t; callerがUSERへ変換) static inline hak_base_ptr_t unified_cache_pop(int class_idx) { // Phase 8-Step1: Use config macro for dead code elimination in PGO mode // Fast path: Unified cache disabled → return NULL immediately #include "../box/tiny_front_config_box.h" if (__builtin_expect(!TINY_FRONT_UNIFIED_CACHE_ENABLED, 0)) return HAK_BASE_FROM_RAW(NULL); TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS) // Phase 8-Step3: Lazy init check (conditional in PGO mode) // Phase 46A: Skip lazy-init check in FAST bench (guaranteed by startup init) // PGO builds assume bench_fast_init() prewarmed cache → remove check (-1 branch) #if !HAKMEM_TINY_FRONT_PGO && !HAKMEM_BENCH_MINIMAL // Lazy init check (once per thread, per class) if (__builtin_expect(cache->slots == NULL, 0)) { unified_cache_init(); // First call in this thread // Re-check after init (may fail if allocation failed) if (cache->slots == NULL) return HAK_BASE_FROM_RAW(NULL); } #endif // Empty check if (__builtin_expect(cache->head == cache->tail, 0)) { #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_miss[class_idx]++; #endif return HAK_BASE_FROM_RAW(NULL); // Empty } // Pop from head (consumer) void* base = cache->slots[cache->head]; // 1 cache miss (array access) cache->head = (cache->head + 1) & cache->mask; // Fast modulo (power of 2) #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_hit[class_idx]++; #endif return HAK_BASE_FROM_RAW(base); // Return BASE pointer (2-3 cache misses total) } // Push to unified cache (free fast path) // Input: BASE pointer (wrapped hak_base_ptr_t; caller must pass BASE, not USER) // Returns: 1=SUCCESS, 0=FULL static inline int unified_cache_push(int class_idx, hak_base_ptr_t base) { // Phase 8-Step1: Use config macro for dead code elimination in PGO mode // Fast path: Unified cache disabled → return 0 (not handled) if (__builtin_expect(!TINY_FRONT_UNIFIED_CACHE_ENABLED, 0)) return 0; void* base_raw = HAK_BASE_TO_RAW(base); #if HAKMEM_TINY_TCACHE_COMPILED // Phase 14 v1: Try tcache first (intrusive LIFO, no array access) // Phase 22: Compile-out when disabled (default OFF) if (tiny_tcache_try_push(class_idx, base_raw)) { return 1; // SUCCESS (tcache hit, no array access) } #endif // Tcache overflow/disabled/compiled-out → fall through to array cache TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS) // Phase 8-Step3: Lazy init check (conditional in PGO mode) // Phase 46A: Skip lazy-init check in FAST bench (guaranteed by startup init) // PGO builds assume bench_fast_init() prewarmed cache → remove check (-1 branch) #if !HAKMEM_TINY_FRONT_PGO && !HAKMEM_BENCH_MINIMAL // Lazy init check (once per thread, per class) if (__builtin_expect(cache->slots == NULL, 0)) { unified_cache_init(); // First call in this thread // Re-check after init (may fail if allocation failed) if (cache->slots == NULL) return 0; } #endif // Phase 74-2: LOCALIZE optimization (compile-time gate, no runtime branch) #if HAKMEM_TINY_UC_LOCALIZE_COMPILED // LOCALIZE: Load head/tail/mask once into locals to avoid reload dependency chains uint16_t head = cache->head; uint16_t tail = cache->tail; uint16_t mask = cache->mask; uint16_t next_tail = (tail + 1) & mask; if (__builtin_expect(next_tail == head, 0)) { #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_full[class_idx]++; #endif return 0; // Full } cache->slots[tail] = base_raw; cache->tail = next_tail; #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_push[class_idx]++; #endif return 1; // SUCCESS (LOCALIZE path) #else // Default path: Original implementation uint16_t next_tail = (cache->tail + 1) & cache->mask; // Full check (leave 1 slot empty to distinguish full/empty) if (__builtin_expect(next_tail == cache->head, 0)) { #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_full[class_idx]++; #endif return 0; // Full } // Push to tail (producer) cache->slots[cache->tail] = base_raw; // 1 cache miss (array write) cache->tail = next_tail; #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_push[class_idx]++; #endif return 1; // SUCCESS (2-3 cache misses total) #endif // HAKMEM_TINY_UC_LOCALIZE_COMPILED } // ============================================================================ // Phase 74-3 (P0): FASTAPI - Fast-path push (assumes preconditions met) // ============================================================================ // Preconditions (caller must ensure): // - Unified cache is enabled (TINY_FRONT_UNIFIED_CACHE_ENABLED == 1) // - TLS cache is initialized (cache->slots != NULL) // - Stats are compiled-out or caller doesn't need them // Returns: 1=SUCCESS, 0=FULL (same as unified_cache_push) static inline int unified_cache_push_fast(int class_idx, hak_base_ptr_t base) { void* base_raw = HAK_BASE_TO_RAW(base); TinyUnifiedCache* cache = &g_unified_cache[class_idx]; uint16_t next_tail = (cache->tail + 1) & cache->mask; // Full check (leave 1 slot empty to distinguish full/empty) if (__builtin_expect(next_tail == cache->head, 0)) { #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_full[class_idx]++; #endif return 0; // Full } // Push to tail (producer) cache->slots[cache->tail] = base_raw; cache->tail = next_tail; #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_push[class_idx]++; #endif return 1; // SUCCESS (FASTAPI path) } // ============================================================================ // Phase 23-D: Self-Contained Pop-or-Refill (tcache-style, single-layer) // ============================================================================ // All-in-one: Pop from cache, or refill from SuperSlab on miss // Returns: BASE pointer (wrapped hak_base_ptr_t), or NULL-wrapped if failed // Design: Self-contained, bypasses all other frontend layers (Ring/FC/SFC/SLL) static inline hak_base_ptr_t unified_cache_pop_or_refill(int class_idx) { // Phase 8-Step1: Use config macro for dead code elimination in PGO mode // Fast path: Unified cache disabled → NULL-wrapped (caller uses legacy cascade) if (__builtin_expect(!TINY_FRONT_UNIFIED_CACHE_ENABLED, 0)) return HAK_BASE_FROM_RAW(NULL); TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS) // Phase 8-Step3: Lazy init check (conditional in PGO mode) // Phase 46A: Skip lazy-init check in FAST bench (guaranteed by startup init) // PGO builds assume bench_fast_init() prewarmed cache → remove check (-1 branch) #if !HAKMEM_TINY_FRONT_PGO && !HAKMEM_BENCH_MINIMAL // Lazy init check (once per thread, per class) if (__builtin_expect(cache->slots == NULL, 0)) { unified_cache_init(); if (cache->slots == NULL) return HAK_BASE_FROM_RAW(NULL); } #endif #if HAKMEM_TINY_TCACHE_COMPILED // Phase 14 v1: Try tcache first (intrusive LIFO, no array access) // Phase 22: Compile-out when disabled (default OFF) void* tcache_base = tiny_tcache_try_pop(class_idx); if (tcache_base != NULL) { #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_hit[class_idx]++; #endif #if HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED // Phase 23: Performance measurement (compile-out when disabled, default OFF) if (__builtin_expect(unified_cache_measure_check(), 0)) { atomic_fetch_add_explicit(&g_unified_cache_hits_global, 1, memory_order_relaxed); atomic_fetch_add_explicit(&g_unified_cache_hits_by_class[class_idx], 1, memory_order_relaxed); } #endif return HAK_BASE_FROM_RAW(tcache_base); // HIT (tcache, no array access) } #endif // Phase 74-2: LOCALIZE optimization (compile-time gate, no runtime branch) #if HAKMEM_TINY_UC_LOCALIZE_COMPILED // LOCALIZE: Load head/tail/mask once into locals to avoid reload dependency chains uint16_t head = cache->head; uint16_t tail = cache->tail; uint16_t mask = cache->mask; if (__builtin_expect(head != tail, 1)) { void* base = cache->slots[head]; cache->head = (head + 1) & mask; #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_hit[class_idx]++; #endif #if HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED if (__builtin_expect(unified_cache_measure_check(), 0)) { atomic_fetch_add_explicit(&g_unified_cache_hits_global, 1, memory_order_relaxed); atomic_fetch_add_explicit(&g_unified_cache_hits_by_class[class_idx], 1, memory_order_relaxed); } #endif return HAK_BASE_FROM_RAW(base); // Hit! (LOCALIZE path) } // Cache miss → Batch refill from SuperSlab #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_miss[class_idx]++; #endif return unified_cache_refill(class_idx); #else // Default path: Original implementation // Tcache miss/disabled/compiled-out → try pop from array cache (fast path) if (__builtin_expect(cache->head != cache->tail, 1)) { void* base = cache->slots[cache->head]; // 1 cache miss (array access) cache->head = (cache->head + 1) & cache->mask; #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_hit[class_idx]++; #endif #if HAKMEM_TINY_UNIFIED_CACHE_MEASURE_COMPILED // Phase 23: Performance measurement (compile-out when disabled, default OFF) if (__builtin_expect(unified_cache_measure_check(), 0)) { atomic_fetch_add_explicit(&g_unified_cache_hits_global, 1, memory_order_relaxed); // Per-class ヒット(C5–C7 の利用率も可視化) atomic_fetch_add_explicit(&g_unified_cache_hits_by_class[class_idx], 1, memory_order_relaxed); } #endif return HAK_BASE_FROM_RAW(base); // Hit! (2-3 cache misses total) } // Cache miss → Batch refill from SuperSlab #if !HAKMEM_BUILD_RELEASE || HAKMEM_UNIFIED_CACHE_STATS_COMPILED g_unified_cache_miss[class_idx]++; #endif return unified_cache_refill(class_idx); // Refill + return first block (BASE) #endif // HAKMEM_TINY_UC_LOCALIZE_COMPILED } #endif // HAK_FRONT_TINY_UNIFIED_CACHE_H