// Box: Legacy Backend (Phase 12) // Purpose: Per-class SuperSlabHead backend (legacy implementation) #include "ss_legacy_backend_box.h" #include "ss_allocation_box.h" #include "hakmem_tiny_config.h" #include "hakmem_tiny.h" // For tiny_self_u32 #include #include #include // ============================================================================ // Global State // ============================================================================ // Phase 2a: Dynamic Expansion - Global per-class SuperSlabHeads SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS] = {NULL}; // Legacy fallback hint box (per-thread, per-class) static __thread SuperSlab* g_ss_legacy_hint_ss[TINY_NUM_CLASSES_SS]; static __thread uint8_t g_ss_legacy_hint_slab[TINY_NUM_CLASSES_SS]; // ============================================================================ // Hint Box (Optional Optimization) // ============================================================================ void hak_tiny_ss_hint_record(int class_idx, SuperSlab* ss, int slab_idx) { if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) return; if (!ss || slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) return; g_ss_legacy_hint_ss[class_idx] = ss; g_ss_legacy_hint_slab[class_idx] = (uint8_t)slab_idx; } void* hak_tiny_alloc_superslab_backend_hint(int class_idx) { static int g_hint_enabled = -1; if (__builtin_expect(g_hint_enabled == -1, 0)) { const char* e = getenv("HAKMEM_TINY_SS_LEGACY_HINT"); g_hint_enabled = (e && *e && *e != '0') ? 1 : 0; } if (!g_hint_enabled) { return NULL; } if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) { return NULL; } SuperSlab* ss = g_ss_legacy_hint_ss[class_idx]; int slab_idx = (int)g_ss_legacy_hint_slab[class_idx]; if (!ss) { return NULL; } // Basic sanity: Superslab still alive? if (ss->magic != SUPERSLAB_MAGIC) { g_ss_legacy_hint_ss[class_idx] = NULL; return NULL; } if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) { g_ss_legacy_hint_ss[class_idx] = NULL; return NULL; } TinySlabMeta* meta = &ss->slabs[slab_idx]; if (meta->capacity == 0 || meta->used >= meta->capacity) { // Hint slab exhausted; clear and fall back. g_ss_legacy_hint_ss[class_idx] = NULL; return NULL; } if (meta->class_idx != (uint8_t)class_idx && meta->class_idx != 255) { // Different class bound; hint no longer valid. g_ss_legacy_hint_ss[class_idx] = NULL; return NULL; } size_t stride = tiny_block_stride_for_class(class_idx); size_t offset = (size_t)meta->used * stride; size_t slab_base_off = SUPERSLAB_SLAB0_DATA_OFFSET + (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE; uint8_t* base = (uint8_t*)ss + slab_base_off + offset; meta->used++; atomic_fetch_add_explicit(&ss->total_active_blocks, 1, memory_order_relaxed); // Keep hint as long as there is remaining capacity. if (meta->used >= meta->capacity) { g_ss_legacy_hint_ss[class_idx] = NULL; } return (void*)base; } // ============================================================================ // Legacy Backend Implementation // ============================================================================ /* * Legacy backend for hak_tiny_alloc_superslab_box(). * * Phase 12 Stage A/B: * - Uses per-class SuperSlabHead (g_superslab_heads) as the implementation. * - Callers MUST use hak_tiny_alloc_superslab_box() and never touch this directly. * - Later Stage C: this function will be replaced by a shared_pool backend. */ void* hak_tiny_alloc_superslab_backend_legacy(int class_idx) { if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) { return NULL; } SuperSlabHead* head = g_superslab_heads[class_idx]; if (!head) { head = init_superslab_head(class_idx); if (!head) { return NULL; } g_superslab_heads[class_idx] = head; } SuperSlab* chunk = head->current_chunk ? head->current_chunk : head->first_chunk; while (chunk) { int cap = ss_slabs_capacity(chunk); for (int slab_idx = 0; slab_idx < cap; slab_idx++) { TinySlabMeta* meta = &chunk->slabs[slab_idx]; if (meta->capacity == 0) { continue; } if (meta->used < meta->capacity) { size_t stride = tiny_block_stride_for_class(class_idx); size_t offset = (size_t)meta->used * stride; uint8_t* base = (uint8_t*)chunk + SUPERSLAB_SLAB0_DATA_OFFSET + (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE + offset; hak_tiny_ss_hint_record(class_idx, chunk, slab_idx); meta->used++; atomic_fetch_add_explicit(&chunk->total_active_blocks, 1, memory_order_relaxed); return (void*)base; } } chunk = chunk->next_chunk; } if (expand_superslab_head(head) < 0) { return NULL; } SuperSlab* new_chunk = head->current_chunk; if (!new_chunk) { return NULL; } int cap2 = ss_slabs_capacity(new_chunk); for (int slab_idx = 0; slab_idx < cap2; slab_idx++) { TinySlabMeta* meta = &new_chunk->slabs[slab_idx]; if (meta->capacity == 0) continue; if (meta->used < meta->capacity) { size_t stride = tiny_block_stride_for_class(class_idx); size_t offset = (size_t)meta->used * stride; uint8_t* base = (uint8_t*)new_chunk + SUPERSLAB_SLAB0_DATA_OFFSET + (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE + offset; hak_tiny_ss_hint_record(class_idx, new_chunk, slab_idx); meta->used++; atomic_fetch_add_explicit(&new_chunk->total_active_blocks, 1, memory_order_relaxed); return (void*)base; } } return NULL; } // ============================================================================ // SuperSlabHead Management // ============================================================================ // Initialize SuperSlabHead for a class SuperSlabHead* init_superslab_head(int class_idx) { if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) { return NULL; } // Allocate SuperSlabHead structure SuperSlabHead* head = (SuperSlabHead*)calloc(1, sizeof(SuperSlabHead)); if (!head) { extern __thread int g_hakmem_lock_depth; g_hakmem_lock_depth++; fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate SuperSlabHead for class %d\n", class_idx); g_hakmem_lock_depth--; return NULL; } head->class_idx = (uint8_t)class_idx; atomic_store_explicit(&head->total_chunks, 0, memory_order_relaxed); head->first_chunk = NULL; head->current_chunk = NULL; pthread_mutex_init(&head->expansion_lock, NULL); // Allocate initial chunk(s) // Hot classes (1, 4, 6) get 2 initial chunks to reduce contention int initial_chunks = 1; // Phase 2a: Start with 1 chunk for all classes (expansion will handle growth) // This reduces startup memory overhead while still allowing unlimited growth initial_chunks = 1; for (int i = 0; i < initial_chunks; i++) { if (expand_superslab_head(head) < 0) { extern __thread int g_hakmem_lock_depth; g_hakmem_lock_depth++; fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate initial chunk %d for class %d\n", i, class_idx); g_hakmem_lock_depth--; // Cleanup on failure SuperSlab* chunk = head->first_chunk; while (chunk) { SuperSlab* next = chunk->next_chunk; superslab_free(chunk); chunk = next; } pthread_mutex_destroy(&head->expansion_lock); free(head); return NULL; } } extern __thread int g_hakmem_lock_depth; g_hakmem_lock_depth++; #if !HAKMEM_BUILD_RELEASE fprintf(stderr, "[HAKMEM] Initialized SuperSlabHead for class %d: %zu initial chunks\n", class_idx, atomic_load_explicit(&head->total_chunks, memory_order_relaxed)); #endif g_hakmem_lock_depth--; return head; } // Expand SuperSlabHead by allocating and linking a new chunk int expand_superslab_head(SuperSlabHead* head) { if (!head) { return -1; } // Allocate new chunk via existing superslab_allocate SuperSlab* new_chunk = superslab_allocate(head->class_idx); if (!new_chunk) { #if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE) extern __thread int g_hakmem_lock_depth; g_hakmem_lock_depth++; fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate new chunk for class %d (system OOM)\n", head->class_idx); g_hakmem_lock_depth--; #endif return -1; // True OOM (system out of memory) } // CRITICAL FIX: Initialize slab 0 so bitmap != 0x00000000 // Phase 2a chunks must have at least one usable slab after allocation size_t block_size = g_tiny_class_sizes[head->class_idx]; // Use pthread_self() directly since tiny_self_u32() is static inline in hakmem_tiny.c uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self(); superslab_init_slab(new_chunk, 0, block_size, owner_tid); // Initialize the next_chunk link to NULL new_chunk->next_chunk = NULL; // Thread-safe linking pthread_mutex_lock(&head->expansion_lock); if (head->current_chunk) { // Find the tail of the list (optimization: could cache tail pointer) SuperSlab* tail = head->current_chunk; while (tail->next_chunk) { tail = tail->next_chunk; } tail->next_chunk = new_chunk; } else { // First chunk head->first_chunk = new_chunk; } // Update current chunk to new chunk (for fast allocation) head->current_chunk = new_chunk; // Increment total chunks atomically size_t old_count = atomic_fetch_add_explicit(&head->total_chunks, 1, memory_order_relaxed); size_t new_count = old_count + 1; pthread_mutex_unlock(&head->expansion_lock); #if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE) extern __thread int g_hakmem_lock_depth; g_hakmem_lock_depth++; fprintf(stderr, "[HAKMEM] Expanded SuperSlabHead for class %d: %zu chunks now (bitmap=0x%08x)\n", head->class_idx, new_count, new_chunk->slab_bitmap); g_hakmem_lock_depth--; #endif return 0; } // Find which chunk a pointer belongs to SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx) { if (!ptr || class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) { return NULL; } SuperSlabHead* head = g_superslab_heads[class_idx]; if (!head) { return NULL; } uintptr_t ptr_addr = (uintptr_t)ptr; // Walk the chunk list SuperSlab* chunk = head->first_chunk; while (chunk) { // Check if ptr is within this chunk's memory range // Each chunk is aligned to SUPERSLAB_SIZE (1MB or 2MB) uintptr_t chunk_start = (uintptr_t)chunk; size_t chunk_size = (size_t)1 << chunk->lg_size; // Use actual chunk size uintptr_t chunk_end = chunk_start + chunk_size; if (ptr_addr >= chunk_start && ptr_addr < chunk_end) { // Found the chunk return chunk; } chunk = chunk->next_chunk; } return NULL; // Not found in any chunk }