// hakmem_tiny_superslab.c - SuperSlab allocator implementation (Phase 6.22) // Purpose: 2MB aligned slab allocation with fast pointer→slab lookup // License: MIT // Date: 2025-10-24 #include "hakmem_tiny_superslab.h" #include "hakmem_super_registry.h" // Phase 1: Registry integration #include #include #include #include #include #include // getenv, atoi #include #include #include // getrlimit for OOM diagnostics #include static int g_ss_force_lg = -1; static _Atomic int g_ss_populate_once = 0; // ============================================================================ // Global Statistics // ============================================================================ static pthread_mutex_t g_superslab_lock = PTHREAD_MUTEX_INITIALIZER; uint64_t g_superslabs_allocated = 0; // Non-static for debugging uint64_t g_superslabs_freed = 0; // Phase 7.6: Non-static for test access uint64_t g_bytes_allocated = 0; // Non-static for debugging // Per-class counters for gating/metrics (Tiny classes = 8) uint64_t g_ss_alloc_by_class[8] = {0}; uint64_t g_ss_freed_by_class[8] = {0}; typedef struct SuperslabCacheEntry { struct SuperslabCacheEntry* next; } SuperslabCacheEntry; static SuperslabCacheEntry* g_ss_cache_head[8] = {0}; static size_t g_ss_cache_count[8] = {0}; static size_t g_ss_cache_cap[8] = {0}; static size_t g_ss_precharge_target[8] = {0}; static _Atomic int g_ss_precharge_done[8] = {0}; static int g_ss_cache_enabled = 0; static pthread_once_t g_ss_cache_once = PTHREAD_ONCE_INIT; static pthread_mutex_t g_ss_cache_lock[8]; uint64_t g_ss_cache_hits[8] = {0}; uint64_t g_ss_cache_misses[8] = {0}; uint64_t g_ss_cache_puts[8] = {0}; uint64_t g_ss_cache_drops[8] = {0}; uint64_t g_ss_cache_precharged[8] = {0}; uint64_t g_superslabs_reused = 0; uint64_t g_superslabs_cached = 0; static void ss_cache_global_init(void) { for (int i = 0; i < 8; i++) { pthread_mutex_init(&g_ss_cache_lock[i], NULL); } } static inline void ss_cache_ensure_init(void) { pthread_once(&g_ss_cache_once, ss_cache_global_init); } static void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int populate); static void ss_cache_precharge(uint8_t size_class, size_t ss_size, uintptr_t ss_mask); static SuperslabCacheEntry* ss_cache_pop(uint8_t size_class); static int ss_cache_push(uint8_t size_class, SuperSlab* ss); static inline void ss_stats_os_alloc(uint8_t size_class, size_t ss_size) { pthread_mutex_lock(&g_superslab_lock); g_superslabs_allocated++; if (size_class < 8) { g_ss_alloc_by_class[size_class]++; } g_bytes_allocated += ss_size; pthread_mutex_unlock(&g_superslab_lock); } static inline void ss_stats_cache_reuse(void) { pthread_mutex_lock(&g_superslab_lock); g_superslabs_reused++; pthread_mutex_unlock(&g_superslab_lock); } static inline void ss_stats_cache_store(void) { pthread_mutex_lock(&g_superslab_lock); g_superslabs_cached++; pthread_mutex_unlock(&g_superslab_lock); } // ============================================================================ // Phase 8.3: ACE (Adaptive Cache Engine) State // ============================================================================ SuperSlabACEState g_ss_ace[TINY_NUM_CLASSES_SS] = {{0}}; // Phase 8.3: hak_now_ns() is now defined in hakmem_tiny_superslab.h as static inline // ============================================================================ // Diagnostics // ============================================================================ static void log_superslab_oom_once(size_t ss_size, size_t alloc_size, int err) { static int logged = 0; if (logged) return; logged = 1; struct rlimit rl = {0}; if (getrlimit(RLIMIT_AS, &rl) != 0) { rl.rlim_cur = RLIM_INFINITY; rl.rlim_max = RLIM_INFINITY; } unsigned long vm_size_kb = 0; unsigned long vm_rss_kb = 0; FILE* status = fopen("/proc/self/status", "r"); if (status) { char line[256]; while (fgets(line, sizeof(line), status)) { if (strncmp(line, "VmSize:", 7) == 0) { (void)sscanf(line + 7, "%lu", &vm_size_kb); } else if (strncmp(line, "VmRSS:", 6) == 0) { (void)sscanf(line + 6, "%lu", &vm_rss_kb); } } fclose(status); } char rl_cur_buf[32]; char rl_max_buf[32]; if (rl.rlim_cur == RLIM_INFINITY) { strcpy(rl_cur_buf, "inf"); } else { snprintf(rl_cur_buf, sizeof(rl_cur_buf), "%llu", (unsigned long long)rl.rlim_cur); } if (rl.rlim_max == RLIM_INFINITY) { strcpy(rl_max_buf, "inf"); } else { snprintf(rl_max_buf, sizeof(rl_max_buf), "%llu", (unsigned long long)rl.rlim_max); } fprintf(stderr, "[SS OOM] mmap failed: err=%d ss_size=%zu alloc_size=%zu " "alloc=%llu freed=%llu bytes=%llu " "RLIMIT_AS(cur=%s max=%s) VmSize=%lu kB VmRSS=%lu kB\n", err, ss_size, alloc_size, (unsigned long long)g_superslabs_allocated, (unsigned long long)g_superslabs_freed, (unsigned long long)g_bytes_allocated, rl_cur_buf, rl_max_buf, vm_size_kb, vm_rss_kb); } static void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int populate) { void* ptr = NULL; #ifdef MAP_ALIGNED_SUPER int map_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER; #ifdef MAP_POPULATE if (populate) { map_flags |= MAP_POPULATE; } #endif ptr = mmap(NULL, ss_size, PROT_READ | PROT_WRITE, map_flags, -1, 0); if (ptr != MAP_FAILED) { if (((uintptr_t)ptr & ss_mask) == 0) { ss_stats_os_alloc(size_class, ss_size); return ptr; } munmap(ptr, ss_size); ptr = NULL; } else { log_superslab_oom_once(ss_size, ss_size, errno); } #endif size_t alloc_size = ss_size * 2; int flags = MAP_PRIVATE | MAP_ANONYMOUS; #ifdef MAP_POPULATE if (populate) { flags |= MAP_POPULATE; } #endif void* raw = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE, flags, -1, 0); if (raw == MAP_FAILED) { log_superslab_oom_once(ss_size, alloc_size, errno); return NULL; } uintptr_t raw_addr = (uintptr_t)raw; uintptr_t aligned_addr = (raw_addr + ss_mask) & ~ss_mask; ptr = (void*)aligned_addr; size_t prefix_size = aligned_addr - raw_addr; if (prefix_size > 0) { munmap(raw, prefix_size); } size_t suffix_size = alloc_size - prefix_size - ss_size; if (suffix_size > 0) { if (populate) { #ifdef MADV_DONTNEED madvise((char*)ptr + ss_size, suffix_size, MADV_DONTNEED); #endif } else { munmap((char*)ptr + ss_size, suffix_size); } } ss_stats_os_alloc(size_class, ss_size); return ptr; } static void ss_cache_precharge(uint8_t size_class, size_t ss_size, uintptr_t ss_mask) { if (!g_ss_cache_enabled) return; if (size_class >= 8) return; if (g_ss_precharge_target[size_class] == 0) return; if (atomic_load_explicit(&g_ss_precharge_done[size_class], memory_order_acquire)) return; ss_cache_ensure_init(); pthread_mutex_lock(&g_ss_cache_lock[size_class]); size_t target = g_ss_precharge_target[size_class]; size_t cap = g_ss_cache_cap[size_class]; size_t desired = target; if (cap != 0 && desired > cap) { desired = cap; } while (g_ss_cache_count[size_class] < desired) { void* raw = ss_os_acquire(size_class, ss_size, ss_mask, 1); if (!raw) { break; } SuperslabCacheEntry* entry = (SuperslabCacheEntry*)raw; entry->next = g_ss_cache_head[size_class]; g_ss_cache_head[size_class] = entry; g_ss_cache_count[size_class]++; g_ss_cache_precharged[size_class]++; } atomic_store_explicit(&g_ss_precharge_done[size_class], 1, memory_order_release); pthread_mutex_unlock(&g_ss_cache_lock[size_class]); } static SuperslabCacheEntry* ss_cache_pop(uint8_t size_class) { if (!g_ss_cache_enabled) return NULL; if (size_class >= 8) return NULL; ss_cache_ensure_init(); pthread_mutex_lock(&g_ss_cache_lock[size_class]); SuperslabCacheEntry* entry = g_ss_cache_head[size_class]; if (entry) { g_ss_cache_head[size_class] = entry->next; if (g_ss_cache_count[size_class] > 0) { g_ss_cache_count[size_class]--; } entry->next = NULL; g_ss_cache_hits[size_class]++; } else { g_ss_cache_misses[size_class]++; } pthread_mutex_unlock(&g_ss_cache_lock[size_class]); return entry; } static int ss_cache_push(uint8_t size_class, SuperSlab* ss) { if (!g_ss_cache_enabled) return 0; if (size_class >= 8) return 0; ss_cache_ensure_init(); pthread_mutex_lock(&g_ss_cache_lock[size_class]); size_t cap = g_ss_cache_cap[size_class]; if (cap != 0 && g_ss_cache_count[size_class] >= cap) { g_ss_cache_drops[size_class]++; pthread_mutex_unlock(&g_ss_cache_lock[size_class]); return 0; } SuperslabCacheEntry* entry = (SuperslabCacheEntry*)ss; entry->next = g_ss_cache_head[size_class]; g_ss_cache_head[size_class] = entry; g_ss_cache_count[size_class]++; g_ss_cache_puts[size_class]++; pthread_mutex_unlock(&g_ss_cache_lock[size_class]); return 1; } // ============================================================================ // SuperSlab Allocation (2MB aligned) // ============================================================================ SuperSlab* superslab_allocate(uint8_t size_class) { // Optional fault injection for testing: HAKMEM_TINY_SS_FAULT_RATE=N → 1/N で失敗 static int fault_rate = -1; // -1=unparsed, 0=disabled, >0=rate static __thread unsigned long fault_tick = 0; if (__builtin_expect(fault_rate == -1, 0)) { const char* e = getenv("HAKMEM_TINY_SS_FAULT_RATE"); if (e && *e) { int v = atoi(e); if (v < 0) v = 0; fault_rate = v; } else { fault_rate = 0; } } if (fault_rate > 0) { unsigned long t = ++fault_tick; if ((t % (unsigned long)fault_rate) == 0ul) { return NULL; // simulate OOM } } // Optional env clamp for SuperSlab size static int env_parsed = 0; static uint8_t g_ss_min_lg_env = SUPERSLAB_LG_MIN; static uint8_t g_ss_max_lg_env = SUPERSLAB_LG_MAX; if (!env_parsed) { char* maxmb = getenv("HAKMEM_TINY_SS_MAX_MB"); if (maxmb) { int m = atoi(maxmb); if (m == 1) g_ss_max_lg_env = 20; else if (m == 2) g_ss_max_lg_env = 21; } char* minmb = getenv("HAKMEM_TINY_SS_MIN_MB"); if (minmb) { int m = atoi(minmb); if (m == 1) g_ss_min_lg_env = 20; else if (m == 2) g_ss_min_lg_env = 21; } if (g_ss_min_lg_env > g_ss_max_lg_env) g_ss_min_lg_env = g_ss_max_lg_env; const char* force_lg_env = getenv("HAKMEM_TINY_SS_FORCE_LG"); if (force_lg_env && *force_lg_env) { int v = atoi(force_lg_env); if (v >= SUPERSLAB_LG_MIN && v <= SUPERSLAB_LG_MAX) { g_ss_force_lg = v; g_ss_min_lg_env = g_ss_max_lg_env = v; } } size_t precharge_default = 0; const char* precharge_env = getenv("HAKMEM_TINY_SS_PRECHARGE"); if (precharge_env && *precharge_env) { long v = atol(precharge_env); if (v < 0) v = 0; precharge_default = (size_t)v; if (v > 0) { atomic_store_explicit(&g_ss_populate_once, 1, memory_order_relaxed); } } size_t cache_default = 0; const char* cache_env = getenv("HAKMEM_TINY_SS_CACHE"); if (cache_env && *cache_env) { long v = atol(cache_env); if (v < 0) v = 0; cache_default = (size_t)v; } for (int i = 0; i < 8; i++) { g_ss_cache_cap[i] = cache_default; g_ss_precharge_target[i] = precharge_default; } for (int i = 0; i < 8; i++) { char name[64]; snprintf(name, sizeof(name), "HAKMEM_TINY_SS_CACHE_C%d", i); char* cap_env = getenv(name); if (cap_env && *cap_env) { long v = atol(cap_env); if (v < 0) v = 0; g_ss_cache_cap[i] = (size_t)v; } snprintf(name, sizeof(name), "HAKMEM_TINY_SS_PRECHARGE_C%d", i); char* pre_env = getenv(name); if (pre_env && *pre_env) { long v = atol(pre_env); if (v < 0) v = 0; g_ss_precharge_target[i] = (size_t)v; if (v > 0) { atomic_store_explicit(&g_ss_populate_once, 1, memory_order_relaxed); } } if (g_ss_cache_cap[i] > 0 || g_ss_precharge_target[i] > 0) { g_ss_cache_enabled = 1; } } const char* populate_env = getenv("HAKMEM_TINY_SS_POPULATE_ONCE"); if (populate_env && atoi(populate_env) != 0) { atomic_store_explicit(&g_ss_populate_once, 1, memory_order_relaxed); } env_parsed = 1; } uint8_t lg = (g_ss_force_lg >= 0) ? (uint8_t)g_ss_force_lg : hak_tiny_superslab_next_lg(size_class); if (lg < g_ss_min_lg_env) lg = g_ss_min_lg_env; if (lg > g_ss_max_lg_env) lg = g_ss_max_lg_env; size_t ss_size = (size_t)1 << lg; // 2^20 = 1MB, 2^21 = 2MB uintptr_t ss_mask = ss_size - 1; int from_cache = 0; void* ptr = NULL; if (g_ss_cache_enabled && size_class < 8) { ss_cache_precharge(size_class, ss_size, ss_mask); SuperslabCacheEntry* cached = ss_cache_pop(size_class); if (cached) { ptr = (void*)cached; from_cache = 1; } } if (!ptr) { int populate = atomic_exchange_explicit(&g_ss_populate_once, 0, memory_order_acq_rel); ptr = ss_os_acquire(size_class, ss_size, ss_mask, populate); if (!ptr) { return NULL; } } // Initialize SuperSlab header (Phase 1 Quick Win: removed memset for lazy init) SuperSlab* ss = (SuperSlab*)ptr; ss->magic = SUPERSLAB_MAGIC; ss->size_class = size_class; ss->active_slabs = 0; ss->lg_size = lg; // Phase 8.3: Use ACE-determined lg_size (20=1MB, 21=2MB) ss->slab_bitmap = 0; ss->nonempty_mask = 0; // Phase 6-2.1: ChatGPT Pro P0 - init nonempty mask ss->partial_epoch = 0; ss->publish_hint = 0xFF; // Initialize atomics explicitly atomic_store_explicit(&ss->total_active_blocks, 0, memory_order_relaxed); atomic_store_explicit(&ss->refcount, 0, memory_order_relaxed); atomic_store_explicit(&ss->listed, 0, memory_order_relaxed); ss->partial_next = NULL; // Initialize all slab metadata (only up to max slabs for this size) int max_slabs = (int)(ss_size / SLAB_SIZE); for (int i = 0; i < max_slabs; i++) { ss->slabs[i].freelist = NULL; ss->slabs[i].used = 0; ss->slabs[i].capacity = 0; ss->slabs[i].owner_tid = 0; // Initialize remote queue atomics atomic_store_explicit(&ss->remote_heads[i], 0, memory_order_relaxed); atomic_store_explicit(&ss->remote_counts[i], 0, memory_order_relaxed); atomic_store_explicit(&ss->slab_listed[i], 0, memory_order_relaxed); } if (from_cache) { ss_stats_cache_reuse(); } // Phase 8.3: Update ACE current_lg to match allocated size g_ss_ace[size_class].current_lg = lg; // Phase 1: Register SuperSlab in global registry for fast lookup // CRITICAL: Register AFTER full initialization (ss structure is ready) uintptr_t base = (uintptr_t)ss; if (!hak_super_register(base, ss)) { // Registry full - this is a fatal error fprintf(stderr, "HAKMEM FATAL: SuperSlab registry full, cannot register %p\n", ss); // Still return ss to avoid memory leak, but lookups may fail } return ss; } // ============================================================================ // SuperSlab Deallocation // ============================================================================ void superslab_free(SuperSlab* ss) { if (!ss || ss->magic != SUPERSLAB_MAGIC) { return; // Invalid SuperSlab } // Phase 1: Unregister SuperSlab from registry FIRST // CRITICAL ORDER: unregister → clear magic → munmap // This prevents new lookups from finding this SuperSlab uintptr_t base = (uintptr_t)ss; hak_super_unregister(base); // Memory fence to ensure unregister is visible before magic clear atomic_thread_fence(memory_order_release); // Clear magic to prevent use-after-free (after unregister) ss->magic = 0; // Unmap entire SuperSlab using its actual size (1MB or 2MB) size_t ss_size = (size_t)1 << ss->lg_size; int cached = ss_cache_push(ss->size_class, ss); if (cached) { ss_stats_cache_store(); return; } munmap(ss, ss_size); // Update statistics for actual release to OS pthread_mutex_lock(&g_superslab_lock); g_superslabs_freed++; if (ss->size_class < 8) { g_ss_freed_by_class[ss->size_class]++; } g_bytes_allocated -= ss_size; pthread_mutex_unlock(&g_superslab_lock); } // ============================================================================ // Slab Initialization within SuperSlab // ============================================================================ void superslab_init_slab(SuperSlab* ss, int slab_idx, size_t block_size, uint32_t owner_tid) { if (!ss || slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) { return; } // Get slab data region (skip header in first slab) void* slab_start = slab_data_start(ss, slab_idx); if (slab_idx == 0) { // First slab: skip SuperSlab header (64B) + metadata (512B) = 576B slab_start = (char*)slab_start + 1024; // Align to 1KB for safety } // Calculate capacity size_t usable_size = (slab_idx == 0) ? (SLAB_SIZE - 1024) : SLAB_SIZE; int capacity = (int)(usable_size / block_size); // Phase 6.24: Lazy freelist initialization // NO freelist build here! (saves 4000-8000 cycles per slab init) // freelist will be built on-demand when first free() is called // Linear allocation is used until then (sequential memory access) // Initialize slab metadata TinySlabMeta* meta = &ss->slabs[slab_idx]; meta->freelist = NULL; // NULL = linear allocation mode meta->used = 0; meta->capacity = (uint16_t)capacity; meta->owner_tid = owner_tid; // Store slab_start in SuperSlab for later use // (We need this for linear allocation) // Note: We'll calculate this in superslab_alloc_from_slab() instead // Mark slab as active superslab_activate_slab(ss, slab_idx); } // ============================================================================ // Slab Bitmap Management // ============================================================================ void superslab_activate_slab(SuperSlab* ss, int slab_idx) { if (!ss || slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) { return; } uint32_t mask = 1u << slab_idx; if ((ss->slab_bitmap & mask) == 0) { ss->slab_bitmap |= mask; ss->active_slabs++; } } void superslab_deactivate_slab(SuperSlab* ss, int slab_idx) { if (!ss || slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) { return; } uint32_t mask = 1u << slab_idx; if (ss->slab_bitmap & mask) { ss->slab_bitmap &= ~mask; ss->active_slabs--; } } int superslab_find_free_slab(SuperSlab* ss) { if (!ss) return -1; if ((int)ss->active_slabs >= ss_slabs_capacity(ss)) { return -1; // No free slabs } // Find first 0 bit in bitmap int cap = ss_slabs_capacity(ss); for (int i = 0; i < cap; i++) { if ((ss->slab_bitmap & (1u << i)) == 0) { return i; } } return -1; } // ============================================================================ // Statistics / Debugging // ============================================================================ void superslab_print_stats(SuperSlab* ss) { if (!ss || ss->magic != SUPERSLAB_MAGIC) { printf("Invalid SuperSlab\n"); return; } printf("=== SuperSlab Stats ===\n"); printf("Address: %p\n", (void*)ss); printf("Size class: %u\n", ss->size_class); printf("Active slabs: %u / %d\n", ss->active_slabs, ss_slabs_capacity(ss)); printf("Bitmap: 0x%08X\n", ss->slab_bitmap); printf("\nPer-slab details:\n"); for (int i = 0; i < ss_slabs_capacity(ss); i++) { if (ss->slab_bitmap & (1u << i)) { TinySlabMeta* meta = &ss->slabs[i]; printf(" Slab %2d: used=%u/%u freelist=%p owner=%u\n", i, meta->used, meta->capacity, meta->freelist, meta->owner_tid); } } printf("\n"); } // Global statistics void superslab_print_global_stats(void) { pthread_mutex_lock(&g_superslab_lock); printf("=== Global SuperSlab Stats ===\n"); printf("SuperSlabs allocated: %lu\n", g_superslabs_allocated); printf("SuperSlabs freed: %lu\n", g_superslabs_freed); printf("SuperSlabs active: %lu\n", g_superslabs_allocated - g_superslabs_freed); printf("Total bytes allocated: %lu MB\n", g_bytes_allocated / (1024 * 1024)); pthread_mutex_unlock(&g_superslab_lock); } // ============================================================================ // Phase 8.3: ACE Statistics / Debugging // ============================================================================ void superslab_ace_print_stats(void) { printf("=== ACE (Adaptive Cache Engine) Stats ===\n"); const char* class_names[8] = {"8B", "16B", "24B", "32B", "40B", "48B", "56B", "64B"}; printf("Class Curr Targ Hot Allocs Refills Spills LiveBlks\n"); printf("--------------------------------------------------------------\n"); for (int i = 0; i < TINY_NUM_CLASSES_SS; i++) { SuperSlabACEState* c = &g_ss_ace[i]; printf("%-6s %2uMB %2uMB %4u %7u %8u %7u %9u\n", class_names[i], (1u << c->current_lg) / (1024 * 1024), (1u << c->target_lg) / (1024 * 1024), c->hot_score, c->alloc_count, c->refill_count, c->spill_count, c->live_blocks); } printf("\n"); } // ============================================================================ // Phase 8.3: ACE Tick Function (Promotion/Demotion Logic) // ============================================================================ #define ACE_TICK_NS (150ULL * 1000 * 1000) // 150ms tick interval #define ACE_COOLDOWN_NS (800ULL * 1000 * 1000) // 0.8s cooldown (anti-oscillation) // Simplified thresholds for refill activity #define HI_REFILL(k) (g_ss_ace[k].refill_count > 64) // High refill rate #define MID_REFILL(k) (g_ss_ace[k].refill_count > 16) // Medium refill rate // Object sizes per class (for capacity calculation) // Must match TINY size classes: 8, 16, 24, 32, 40, 48, 56, 64 bytes static const int g_tiny_obj_sizes[TINY_NUM_CLASSES_SS] = {8, 16, 24, 32, 40, 48, 56, 64}; void hak_tiny_superslab_ace_tick(int k, uint64_t now) { if (k < 0 || k >= TINY_NUM_CLASSES_SS) return; SuperSlabACEState* c = &g_ss_ace[k]; // Rate limiting: only tick every ACE_TICK_NS (~150ms) if (now - c->last_tick_ns < ACE_TICK_NS) return; // Calculate capacity for 1MB and 2MB SuperSlabs int obj_size = g_tiny_obj_sizes[k]; double cap1MB = (double)((1U << 20) / obj_size); // 1MB capacity double cap2MB = (double)((1U << 21) / obj_size); // 2MB capacity // Calculate hotness score (weighted: 60% live blocks, 40% refill rate) double hot = 0.6 * (double)c->live_blocks + 0.4 * (double)c->refill_count; if (hot < 0) hot = 0; if (hot > 1000) hot = 1000; c->hot_score = (uint16_t)hot; // Cooldown mechanism: prevent size changes within 0.8s of last change static uint64_t last_switch_ns[TINY_NUM_CLASSES_SS] = {0}; if (now - last_switch_ns[k] >= ACE_COOLDOWN_NS) { if (c->current_lg <= 20) { // Promotion condition: 1MB → 2MB // High demand (live > 75% capacity) AND high refill rate if (c->live_blocks > 0.75 * cap1MB && HI_REFILL(k)) { c->target_lg = 21; // Promote to 2MB last_switch_ns[k] = now; } } else { // Demotion condition: 2MB → 1MB // Low demand (live < 35% capacity) AND low refill rate if (c->live_blocks < 0.35 * cap2MB && !MID_REFILL(k)) { c->target_lg = 20; // Demote to 1MB last_switch_ns[k] = now; } } } // EMA-style decay for counters (reduce by 75% each tick) c->alloc_count = c->alloc_count / 4; c->refill_count = c->refill_count / 4; c->spill_count = c->spill_count / 4; // live_blocks is updated incrementally by alloc/free, not decayed here c->last_tick_ns = now; } // ============================================================================ // Phase 8.4: ACE Observer (Registry-based, zero hot-path overhead) // ============================================================================ // Global debug flag (set once at initialization) static int g_ace_debug = 0; // Registry-based observation: scan all SuperSlabs for usage stats static void ace_observe_and_decide(int k) { if (k < 0 || k >= TINY_NUM_CLASSES_SS) return; SuperSlabACEState* c = &g_ss_ace[k]; // Scan Registry to count SuperSlabs and total live blocks int ss_count = 0; uint32_t total_live = 0; for (int i = 0; i < SUPER_REG_SIZE; i++) { SuperRegEntry* e = &g_super_reg[i]; // Atomic read (thread-safe) uintptr_t base = atomic_load_explicit( (_Atomic uintptr_t*)&e->base, memory_order_acquire); if (base == 0) continue; // Empty slot // Phase 8.4: Safety check - skip if ss pointer is invalid if (!e->ss) continue; if (e->ss->size_class != k) continue; // Wrong class ss_count++; // Phase 8.4: Scan all slabs to count used blocks (zero hot-path overhead) uint32_t ss_live = 0; int cap_scan = ss_slabs_capacity(e->ss); for (int slab_idx = 0; slab_idx < cap_scan; slab_idx++) { TinySlabMeta* meta = &e->ss->slabs[slab_idx]; // Relaxed read is OK (stats only, no hot-path impact) ss_live += meta->used; } total_live += ss_live; } // Calculate utilization int obj_size = g_tiny_obj_sizes[k]; uint8_t current_lg = atomic_load_explicit( (_Atomic uint8_t*)&c->current_lg, memory_order_relaxed); uint32_t capacity = (ss_count > 0) ? ss_count * ((1U << current_lg) / obj_size) : 1; double util = (double)total_live / capacity; // Update hot_score (for debugging/visualization) c->hot_score = (uint16_t)(util * 1000); if (c->hot_score > 1000) c->hot_score = 1000; // Promotion/Demotion decision uint8_t new_target = current_lg; if (current_lg <= 20) { // Promotion: 1MB → 2MB if (util > 0.75) { new_target = 21; } } else { // Demotion: 2MB → 1MB if (util < 0.35) { new_target = 20; } } // Debug output (if enabled) if (g_ace_debug && ss_count > 0) { fprintf(stderr, "[ACE] Class %d (%dB): ss=%d live=%u cap=%u util=%.2f%% lg=%d->%d hot=%d\n", k, obj_size, ss_count, total_live, capacity, util * 100.0, current_lg, new_target, c->hot_score); } // Atomic write (thread-safe) if (new_target != current_lg) { atomic_store_explicit( (_Atomic uint8_t*)&c->target_lg, new_target, memory_order_release); if (g_ace_debug) { fprintf(stderr, "[ACE] *** Class %d: SIZE CHANGE %dMB -> %dMB (util=%.2f%%)\n", k, 1 << (current_lg - 20), 1 << (new_target - 20), util * 100.0); } } } // Called from Learner thread (background observation) void hak_tiny_superslab_ace_observe_all(void) { // Initialize debug flag once static int initialized = 0; if (!initialized) { const char* ace_debug = getenv("HAKMEM_ACE_DEBUG"); g_ace_debug = (ace_debug && atoi(ace_debug) != 0) ? 1 : 0; initialized = 1; } for (int k = 0; k < TINY_NUM_CLASSES_SS; k++) { ace_observe_and_decide(k); } }