Implementation: - New single-layer malloc/free path for Tiny (≤1024B) allocations - Bypasses 3-layer overhead: malloc → hak_alloc_at (236 lines) → wrapper → tiny_alloc_fast - Leverages Phase 23 Unified Cache (tcache-style, 2-3 cache misses) - Safe fallback to normal path on Unified Cache miss Performance (Random Mixed 256B, 100K iterations): - Baseline (Phase 26 OFF): 11.33M ops/s - Phase 26 ON: 12.79M ops/s (+12.9%) - Prediction (ChatGPT): +10-15% → Actual: +12.9% (perfect match!) Bug fixes: - Initialization bug: Added hak_init() call before fast path - Page boundary SEGV: Added guard for offset_in_page == 0 Also includes Phase 23 debug log fixes: - Guard C2_CARVE logs with #if !HAKMEM_BUILD_RELEASE - Guard prewarm logs with #if !HAKMEM_BUILD_RELEASE - Set Hot_2048 as default capacity (C2/C3=2048, others=64) Files: - core/front/malloc_tiny_fast.h: Phase 26 implementation (145 lines) - core/box/hak_wrappers.inc.h: Fast path integration (+28 lines) - core/front/tiny_unified_cache.h: Hot_2048 default - core/tiny_refill_opt.h: C2_CARVE log guard - core/box/ss_hot_prewarm_box.c: Prewarm log guard - CURRENT_TASK.md: Phase 26 completion documentation ENV variables: - HAKMEM_FRONT_GATE_UNIFIED=1 (enable Phase 26, default: OFF) - HAKMEM_TINY_UNIFIED_CACHE=1 (Phase 23, required) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
243 lines
9.3 KiB
C
243 lines
9.3 KiB
C
// tiny_unified_cache.h - Phase 23: Unified Frontend Cache (tcache-style)
|
|
//
|
|
// Goal: Flatten 4-5 layer frontend cascade into single-layer array cache
|
|
// Target: +50-100% performance (20.3M → 30-40M ops/s)
|
|
//
|
|
// Design (Task-sensei analysis):
|
|
// - Replace: Ring → FastCache → SFC → TLS SLL (4 layers, 8-10 cache misses)
|
|
// - With: Single unified array cache per class (1 layer, 2-3 cache misses)
|
|
// - Fallback: Direct SuperSlab refill (skip intermediate layers)
|
|
//
|
|
// Performance:
|
|
// - Alloc: 2-3 cache misses (TLS access + array access)
|
|
// - Free: 2-3 cache misses (similar to System malloc tcache)
|
|
// - vs Current: 8-10 cache misses → 2-3 cache misses (70% reduction)
|
|
//
|
|
// ENV Variables:
|
|
// HAKMEM_TINY_UNIFIED_CACHE=1 # Enable Unified cache (default: 0, OFF)
|
|
// HAKMEM_TINY_UNIFIED_C0=128 # C0 cache size (default: 128)
|
|
// ...
|
|
// HAKMEM_TINY_UNIFIED_C7=128 # C7 cache size (default: 128)
|
|
|
|
#ifndef HAK_FRONT_TINY_UNIFIED_CACHE_H
|
|
#define HAK_FRONT_TINY_UNIFIED_CACHE_H
|
|
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include "../hakmem_build_flags.h"
|
|
#include "../hakmem_tiny_config.h" // For TINY_NUM_CLASSES
|
|
|
|
// ============================================================================
|
|
// Unified Cache Structure (per class)
|
|
// ============================================================================
|
|
|
|
typedef struct {
|
|
void** slots; // Dynamic array (allocated at init, power-of-2 size)
|
|
uint16_t head; // Pop index (consumer)
|
|
uint16_t tail; // Push index (producer)
|
|
uint16_t capacity; // Cache size (power of 2 for fast modulo: & (capacity-1))
|
|
uint16_t mask; // Capacity - 1 (for fast modulo)
|
|
} TinyUnifiedCache;
|
|
|
|
// ============================================================================
|
|
// External TLS Variables (defined in tiny_unified_cache.c)
|
|
// ============================================================================
|
|
|
|
extern __thread TinyUnifiedCache g_unified_cache[TINY_NUM_CLASSES];
|
|
|
|
// ============================================================================
|
|
// Metrics (Phase 23, optional for debugging)
|
|
// ============================================================================
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
extern __thread uint64_t g_unified_cache_hit[TINY_NUM_CLASSES]; // Alloc hits
|
|
extern __thread uint64_t g_unified_cache_miss[TINY_NUM_CLASSES]; // Alloc misses
|
|
extern __thread uint64_t g_unified_cache_push[TINY_NUM_CLASSES]; // Free pushes
|
|
extern __thread uint64_t g_unified_cache_full[TINY_NUM_CLASSES]; // Free full (fallback to SuperSlab)
|
|
#endif
|
|
|
|
// ============================================================================
|
|
// ENV Control (cached, lazy init)
|
|
// ============================================================================
|
|
|
|
// Enable flag (default: 0, OFF)
|
|
static inline int unified_cache_enabled(void) {
|
|
static int g_enable = -1;
|
|
if (__builtin_expect(g_enable == -1, 0)) {
|
|
const char* e = getenv("HAKMEM_TINY_UNIFIED_CACHE");
|
|
g_enable = (e && *e && *e != '0') ? 1 : 0;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
if (g_enable) {
|
|
fprintf(stderr, "[Unified-INIT] unified_cache_enabled() = %d\n", g_enable);
|
|
fflush(stderr);
|
|
}
|
|
#endif
|
|
}
|
|
return g_enable;
|
|
}
|
|
|
|
// Per-class capacity (default: Hot_2048 strategy - optimized for 256B workload)
|
|
// Phase 23 Capacity Optimization Result: Hot_2048 = 14.63M ops/s (+43% vs baseline)
|
|
// Hot classes (C2/C3: 128B/256B) get 2048 slots, others get 64 slots
|
|
static inline size_t unified_capacity(int class_idx) {
|
|
static size_t g_cap[TINY_NUM_CLASSES] = {0};
|
|
if (__builtin_expect(g_cap[class_idx] == 0, 0)) {
|
|
char env_name[64];
|
|
snprintf(env_name, sizeof(env_name), "HAKMEM_TINY_UNIFIED_C%d", class_idx);
|
|
const char* e = getenv(env_name);
|
|
|
|
// Default: Hot_2048 strategy (C2/C3=2048, others=64)
|
|
size_t default_cap = 64; // Cold classes
|
|
if (class_idx == 2 || class_idx == 3) {
|
|
default_cap = 2048; // Hot classes (128B, 256B)
|
|
}
|
|
|
|
g_cap[class_idx] = (e && *e) ? (size_t)atoi(e) : default_cap;
|
|
|
|
// Round up to power of 2 (for fast modulo)
|
|
if (g_cap[class_idx] < 32) g_cap[class_idx] = 32;
|
|
if (g_cap[class_idx] > 4096) g_cap[class_idx] = 4096; // Increased limit for Hot_2048
|
|
|
|
// Ensure power of 2
|
|
size_t pow2 = 32;
|
|
while (pow2 < g_cap[class_idx]) pow2 *= 2;
|
|
g_cap[class_idx] = pow2;
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
fprintf(stderr, "[Unified-INIT] C%d capacity = %zu (power of 2)\n", class_idx, g_cap[class_idx]);
|
|
fflush(stderr);
|
|
#endif
|
|
}
|
|
return g_cap[class_idx];
|
|
}
|
|
|
|
// ============================================================================
|
|
// Init/Shutdown Forward Declarations
|
|
// ============================================================================
|
|
|
|
void unified_cache_init(void);
|
|
void unified_cache_shutdown(void);
|
|
void unified_cache_print_stats(void);
|
|
|
|
// ============================================================================
|
|
// Phase 23-D: Self-Contained Refill (Box U1 + Box U2 integration)
|
|
// ============================================================================
|
|
|
|
// Batch refill from SuperSlab (called on cache miss)
|
|
// Returns: BASE pointer (first block), or NULL if failed
|
|
void* unified_cache_refill(int class_idx);
|
|
|
|
// ============================================================================
|
|
// Ultra-Fast Pop/Push (2-3 cache misses, tcache-style)
|
|
// ============================================================================
|
|
|
|
// Pop from unified cache (alloc fast path)
|
|
// Returns: BASE pointer (caller must convert to USER with +1)
|
|
static inline void* unified_cache_pop(int class_idx) {
|
|
// Fast path: Unified cache disabled → return NULL immediately
|
|
if (__builtin_expect(!unified_cache_enabled(), 0)) return NULL;
|
|
|
|
TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS)
|
|
|
|
// Lazy init check (once per thread, per class)
|
|
if (__builtin_expect(cache->slots == NULL, 0)) {
|
|
unified_cache_init(); // First call in this thread
|
|
// Re-check after init (may fail if allocation failed)
|
|
if (cache->slots == NULL) return NULL;
|
|
}
|
|
|
|
// Empty check
|
|
if (__builtin_expect(cache->head == cache->tail, 0)) {
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_unified_cache_miss[class_idx]++;
|
|
#endif
|
|
return NULL; // Empty
|
|
}
|
|
|
|
// Pop from head (consumer)
|
|
void* base = cache->slots[cache->head]; // 1 cache miss (array access)
|
|
cache->head = (cache->head + 1) & cache->mask; // Fast modulo (power of 2)
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_unified_cache_hit[class_idx]++;
|
|
#endif
|
|
|
|
return base; // Return BASE pointer (2-3 cache misses total)
|
|
}
|
|
|
|
// Push to unified cache (free fast path)
|
|
// Input: BASE pointer (caller must pass BASE, not USER)
|
|
// Returns: 1=SUCCESS, 0=FULL
|
|
static inline int unified_cache_push(int class_idx, void* base) {
|
|
// Fast path: Unified cache disabled → return 0 (not handled)
|
|
if (__builtin_expect(!unified_cache_enabled(), 0)) return 0;
|
|
|
|
TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS)
|
|
|
|
// Lazy init check (once per thread, per class)
|
|
if (__builtin_expect(cache->slots == NULL, 0)) {
|
|
unified_cache_init(); // First call in this thread
|
|
// Re-check after init (may fail if allocation failed)
|
|
if (cache->slots == NULL) return 0;
|
|
}
|
|
|
|
uint16_t next_tail = (cache->tail + 1) & cache->mask;
|
|
|
|
// Full check (leave 1 slot empty to distinguish full/empty)
|
|
if (__builtin_expect(next_tail == cache->head, 0)) {
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_unified_cache_full[class_idx]++;
|
|
#endif
|
|
return 0; // Full
|
|
}
|
|
|
|
// Push to tail (producer)
|
|
cache->slots[cache->tail] = base; // 1 cache miss (array write)
|
|
cache->tail = next_tail;
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_unified_cache_push[class_idx]++;
|
|
#endif
|
|
|
|
return 1; // SUCCESS (2-3 cache misses total)
|
|
}
|
|
|
|
// ============================================================================
|
|
// Phase 23-D: Self-Contained Pop-or-Refill (tcache-style, single-layer)
|
|
// ============================================================================
|
|
|
|
// All-in-one: Pop from cache, or refill from SuperSlab on miss
|
|
// Returns: BASE pointer (caller converts to USER), or NULL if failed
|
|
// Design: Self-contained, bypasses all other frontend layers (Ring/FC/SFC/SLL)
|
|
static inline void* unified_cache_pop_or_refill(int class_idx) {
|
|
// Fast path: Unified cache disabled → return NULL (caller uses legacy cascade)
|
|
if (__builtin_expect(!unified_cache_enabled(), 0)) return NULL;
|
|
|
|
TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS)
|
|
|
|
// Lazy init check (once per thread, per class)
|
|
if (__builtin_expect(cache->slots == NULL, 0)) {
|
|
unified_cache_init();
|
|
if (cache->slots == NULL) return NULL;
|
|
}
|
|
|
|
// Try pop from cache (fast path)
|
|
if (__builtin_expect(cache->head != cache->tail, 1)) {
|
|
void* base = cache->slots[cache->head]; // 1 cache miss (array access)
|
|
cache->head = (cache->head + 1) & cache->mask;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_unified_cache_hit[class_idx]++;
|
|
#endif
|
|
return base; // Hit! (2-3 cache misses total)
|
|
}
|
|
|
|
// Cache miss → Batch refill from SuperSlab
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_unified_cache_miss[class_idx]++;
|
|
#endif
|
|
return unified_cache_refill(class_idx); // Refill + return first block
|
|
}
|
|
|
|
#endif // HAK_FRONT_TINY_UNIFIED_CACHE_H
|