Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
140 lines
4.7 KiB
C
140 lines
4.7 KiB
C
// hakmem_tiny_lazy_init.inc.h - Phase 22: Lazy Per-Class Initialization
|
|
// Goal: Reduce cold-start page faults by initializing only used classes
|
|
//
|
|
// ChatGPT Analysis (2025-11-16):
|
|
// - hak_tiny_init() page faults: 94.94% of all page faults
|
|
// - Cause: Eager init of all 8 classes even if only C2/C3 used
|
|
// - Solution: Lazy init per class on first use
|
|
//
|
|
// Expected Impact:
|
|
// - Page faults: -90% (only touch C2/C3 for 256B workload)
|
|
// - Cold start: +30-40% performance (16.2M → 22-25M ops/s)
|
|
|
|
#ifndef HAKMEM_TINY_LAZY_INIT_INC_H
|
|
#define HAKMEM_TINY_LAZY_INIT_INC_H
|
|
|
|
#include <pthread.h>
|
|
#include <stdint.h>
|
|
#include "superslab/superslab_types.h" // For SuperSlabACEState
|
|
|
|
// ============================================================================
|
|
// Phase 22-1: Per-Class Initialization State
|
|
// ============================================================================
|
|
|
|
// Track which classes are initialized (per-thread)
|
|
__thread uint8_t g_class_initialized[TINY_NUM_CLASSES] = {0};
|
|
|
|
// Global one-time init flag (for shared resources)
|
|
static int g_tiny_global_initialized = 0;
|
|
static pthread_mutex_t g_lazy_init_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
// ============================================================================
|
|
// Phase 22-2: Lazy Init Implementation
|
|
// ============================================================================
|
|
|
|
// Initialize one class lazily (called on first use)
|
|
static inline void lazy_init_class(int class_idx) {
|
|
// Fast path: already initialized
|
|
if (__builtin_expect(g_class_initialized[class_idx], 1)) {
|
|
return;
|
|
}
|
|
|
|
// Slow path: need to initialize this class
|
|
pthread_mutex_lock(&g_lazy_init_lock);
|
|
|
|
// Double-check after acquiring lock
|
|
if (g_class_initialized[class_idx]) {
|
|
pthread_mutex_unlock(&g_lazy_init_lock);
|
|
return;
|
|
}
|
|
|
|
// Extract from hak_tiny_init.inc lines 84-103: TLS List Init
|
|
{
|
|
TinyTLSList* tls = &g_tls_lists[class_idx];
|
|
tls->head = NULL;
|
|
tls->count = 0;
|
|
uint32_t base_cap = (uint32_t)tiny_default_cap(class_idx);
|
|
uint32_t class_max = (uint32_t)tiny_cap_max_for_class(class_idx);
|
|
if (base_cap > class_max) base_cap = class_max;
|
|
|
|
// Apply global cap limit if set
|
|
extern int g_mag_cap_limit;
|
|
extern int g_mag_cap_override[TINY_NUM_CLASSES];
|
|
if ((uint32_t)g_mag_cap_limit < base_cap) base_cap = (uint32_t)g_mag_cap_limit;
|
|
if (g_mag_cap_override[class_idx] > 0) {
|
|
uint32_t ov = (uint32_t)g_mag_cap_override[class_idx];
|
|
if (ov > class_max) ov = class_max;
|
|
if (ov > (uint32_t)g_mag_cap_limit) ov = (uint32_t)g_mag_cap_limit;
|
|
if (ov != 0u) base_cap = ov;
|
|
}
|
|
if (base_cap == 0u) base_cap = 32u;
|
|
|
|
tls->cap = base_cap;
|
|
tls->refill_low = tiny_tls_default_refill(base_cap);
|
|
tls->spill_high = tiny_tls_default_spill(base_cap);
|
|
tiny_tls_publish_targets(class_idx, base_cap);
|
|
}
|
|
|
|
// Extract from hak_tiny_init.inc lines 623-625: Per-class lock
|
|
pthread_mutex_init(&g_tiny_class_locks[class_idx].m, NULL);
|
|
|
|
// Extract from hak_tiny_init.inc lines 628-637: ACE state
|
|
{
|
|
extern SuperSlabACEState g_ss_ace[TINY_NUM_CLASSES];
|
|
g_ss_ace[class_idx].current_lg = 20; // Start with 1MB SuperSlabs
|
|
g_ss_ace[class_idx].target_lg = 20;
|
|
g_ss_ace[class_idx].hot_score = 0;
|
|
g_ss_ace[class_idx].alloc_count = 0;
|
|
g_ss_ace[class_idx].refill_count = 0;
|
|
g_ss_ace[class_idx].spill_count = 0;
|
|
g_ss_ace[class_idx].live_blocks = 0;
|
|
g_ss_ace[class_idx].last_tick_ns = 0;
|
|
}
|
|
|
|
// Mark as initialized
|
|
g_class_initialized[class_idx] = 1;
|
|
|
|
pthread_mutex_unlock(&g_lazy_init_lock);
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
fprintf(stderr, "[LAZY_INIT] Class %d initialized\n", class_idx);
|
|
#endif
|
|
}
|
|
|
|
// Global initialization (called once, for non-class resources)
|
|
static inline void lazy_init_global(void) {
|
|
if (__builtin_expect(g_tiny_global_initialized, 1)) {
|
|
return;
|
|
}
|
|
|
|
pthread_mutex_lock(&g_lazy_init_lock);
|
|
|
|
if (g_tiny_global_initialized) {
|
|
pthread_mutex_unlock(&g_lazy_init_lock);
|
|
return;
|
|
}
|
|
|
|
// Initialize SuperSlab subsystem (only once)
|
|
extern int g_use_superslab;
|
|
if (g_use_superslab) {
|
|
extern void hak_super_registry_init(void);
|
|
extern void hak_ss_lru_init(void);
|
|
extern void hak_ss_prewarm_init(void);
|
|
|
|
hak_super_registry_init();
|
|
hak_ss_lru_init();
|
|
hak_ss_prewarm_init();
|
|
}
|
|
|
|
// Mark global resources as initialized
|
|
g_tiny_global_initialized = 1;
|
|
|
|
pthread_mutex_unlock(&g_lazy_init_lock);
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
fprintf(stderr, "[LAZY_INIT] Global resources initialized\n");
|
|
#endif
|
|
}
|
|
|
|
#endif // HAKMEM_TINY_LAZY_INIT_INC_H
|