diff --git a/core/box/ultra_slim_alloc_box.h b/core/box/ultra_slim_alloc_box.h index 952954e5..dd1c3b1a 100644 --- a/core/box/ultra_slim_alloc_box.h +++ b/core/box/ultra_slim_alloc_box.h @@ -60,6 +60,24 @@ extern void* tiny_region_id_write_header(void* base, int class_idx); // ========== Box: Ultra SLIM Allocation (4-Layer Fast Path) ========== +// ========== Statistics & Diagnostics ========== + +// Ultra SLIM hit/miss counters (per-class, TLS) +static __thread uint64_t g_ultra_slim_hits[TINY_NUM_CLASSES] = {0}; +static __thread uint64_t g_ultra_slim_misses[TINY_NUM_CLASSES] = {0}; + +static inline void ultra_slim_track_hit(int class_idx) { + if (class_idx >= 0 && class_idx < TINY_NUM_CLASSES) { + g_ultra_slim_hits[class_idx]++; + } +} + +static inline void ultra_slim_track_miss(int class_idx) { + if (class_idx >= 0 && class_idx < TINY_NUM_CLASSES) { + g_ultra_slim_misses[class_idx]++; + } +} + // Ultra SLIM mode detection (TLS cached, checked once per thread) static inline int ultra_slim_mode_enabled(void) { static __thread int g_ultra_slim_checked = 0; @@ -104,11 +122,13 @@ static inline void* ultra_slim_alloc_4layer_base(size_t size, int* out_class_idx void* base = NULL; if (tls_sll_pop(class_idx, &base)) { // HIT: Fast path success (total: 7-12 cycles) + ultra_slim_track_hit(class_idx); *out_class_idx = class_idx; return base; // Return BASE (caller converts to USER) } // MISS: Return NULL (caller handles refill) + ultra_slim_track_miss(class_idx); return NULL; } @@ -134,6 +154,10 @@ static inline void* ultra_slim_alloc_4layer(size_t size) { // Returns: USER pointer (ready to use) or NULL on OOM // This is the main entry point for Ultra SLIM mode static inline void* ultra_slim_alloc_with_refill(size_t size) { + // Debug: Track that Ultra SLIM path is being called + static __thread uint64_t g_ultra_slim_call_count = 0; + g_ultra_slim_call_count++; + // Fast path: Try 4-layer direct allocation (returns USER pointer) void* user_ptr = ultra_slim_alloc_4layer(size); if (__builtin_expect(user_ptr != NULL, 1)) { @@ -195,24 +219,6 @@ static inline void* ultra_slim_alloc_with_refill(size_t size) { return NULL; // OOM } -// ========== Statistics & Diagnostics ========== - -// Ultra SLIM hit/miss counters (per-class, TLS) -static __thread uint64_t g_ultra_slim_hits[TINY_NUM_CLASSES] = {0}; -static __thread uint64_t g_ultra_slim_misses[TINY_NUM_CLASSES] = {0}; - -static inline void ultra_slim_track_hit(int class_idx) { - if (class_idx >= 0 && class_idx < TINY_NUM_CLASSES) { - g_ultra_slim_hits[class_idx]++; - } -} - -static inline void ultra_slim_track_miss(int class_idx) { - if (class_idx >= 0 && class_idx < TINY_NUM_CLASSES) { - g_ultra_slim_misses[class_idx]++; - } -} - // Print Ultra SLIM statistics (env: HAKMEM_ULTRA_SLIM_STATS=1) static inline int ultra_slim_stats_enabled(void) { static int enabled = -1; @@ -234,7 +240,18 @@ static void ultra_slim_print_stats(void) { total_misses += g_ultra_slim_misses[i]; } - if (total_hits + total_misses == 0) return; + // Always print stats to debug if Ultra SLIM is actually being called + fprintf(stderr, "\n========== Ultra SLIM 4-Layer Stats (DEBUG) ==========\n"); + fprintf(stderr, "Total Hits: %lu\n", (unsigned long)total_hits); + fprintf(stderr, "Total Misses: %lu\n", (unsigned long)total_misses); + fprintf(stderr, "Total Calls: %lu\n", (unsigned long)(total_hits + total_misses)); + + if (total_hits + total_misses == 0) { + fprintf(stderr, "⚠️ WARNING: Ultra SLIM mode enabled but no allocations tracked!\n"); + fprintf(stderr, "This suggests the Ultra SLIM path is not being called.\n"); + fprintf(stderr, "=====================================================\n\n"); + return; + } fprintf(stderr, "\n========== Ultra SLIM 4-Layer Stats ==========\n"); fprintf(stderr, "Total Hits: %lu\n", (unsigned long)total_hits); diff --git a/core/tiny_alloc_fast.inc.h b/core/tiny_alloc_fast.inc.h index ac16766d..915c8b3f 100644 --- a/core/tiny_alloc_fast.inc.h +++ b/core/tiny_alloc_fast.inc.h @@ -696,6 +696,19 @@ static inline void* tiny_alloc_fast(size_t size) { // Expected: 90-110M ops/s (mimalloc parity) // Architecture: Init Safety + Size-to-Class + ACE Learning + TLS SLL Direct // Note: ACE learning preserved (HAKMEM's differentiator vs mimalloc) + + // Debug: Check if Ultra SLIM is enabled (first call only) + static __thread int debug_checked = 0; + if (!debug_checked) { + int enabled = ultra_slim_mode_enabled(); + if (enabled) { + fprintf(stderr, "[TINY_ALLOC_FAST] Ultra SLIM gate: ENABLED (will use 4-layer path)\n"); + } else { + fprintf(stderr, "[TINY_ALLOC_FAST] Ultra SLIM gate: DISABLED (will use standard path)\n"); + } + debug_checked = 1; + } + if (__builtin_expect(ultra_slim_mode_enabled(), 0)) { return ultra_slim_alloc_with_refill(size); }