Files
hakmem/core/box/ss_legacy_backend_box.c

370 lines
13 KiB
C
Raw Normal View History

// Box: Legacy Backend (Phase 12)
// Purpose: Per-class SuperSlabHead backend (legacy implementation)
#include "ss_legacy_backend_box.h"
#include "ss_allocation_box.h"
#include "hakmem_tiny_config.h"
#include "hakmem_tiny.h" // For tiny_self_u32
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
// ============================================================================
// Global State
// ============================================================================
// Phase 2a: Dynamic Expansion - Global per-class SuperSlabHeads
SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS] = {NULL};
// Legacy fallback hint box (per-thread, per-class)
static __thread SuperSlab* g_ss_legacy_hint_ss[TINY_NUM_CLASSES_SS];
static __thread uint8_t g_ss_legacy_hint_slab[TINY_NUM_CLASSES_SS];
// ============================================================================
// Hint Box (Optional Optimization)
// ============================================================================
void hak_tiny_ss_hint_record(int class_idx, SuperSlab* ss, int slab_idx)
{
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) return;
if (!ss || slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) return;
g_ss_legacy_hint_ss[class_idx] = ss;
g_ss_legacy_hint_slab[class_idx] = (uint8_t)slab_idx;
}
void* hak_tiny_alloc_superslab_backend_hint(int class_idx)
{
static int g_hint_enabled = -1;
if (__builtin_expect(g_hint_enabled == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SS_LEGACY_HINT");
g_hint_enabled = (e && *e && *e != '0') ? 1 : 0;
}
if (!g_hint_enabled) {
return NULL;
}
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
SuperSlab* ss = g_ss_legacy_hint_ss[class_idx];
int slab_idx = (int)g_ss_legacy_hint_slab[class_idx];
if (!ss) {
return NULL;
}
// Basic sanity: Superslab still alive?
if (ss->magic != SUPERSLAB_MAGIC) {
g_ss_legacy_hint_ss[class_idx] = NULL;
return NULL;
}
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
g_ss_legacy_hint_ss[class_idx] = NULL;
return NULL;
}
TinySlabMeta* meta = &ss->slabs[slab_idx];
if (meta->capacity == 0 || meta->used >= meta->capacity) {
// Hint slab exhausted; clear and fall back.
g_ss_legacy_hint_ss[class_idx] = NULL;
return NULL;
}
if (meta->class_idx != (uint8_t)class_idx && meta->class_idx != 255) {
// Different class bound; hint no longer valid.
g_ss_legacy_hint_ss[class_idx] = NULL;
return NULL;
}
size_t stride = tiny_block_stride_for_class(class_idx);
size_t offset = (size_t)meta->used * stride;
size_t slab_base_off = SUPERSLAB_SLAB0_DATA_OFFSET
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE;
uint8_t* base = (uint8_t*)ss + slab_base_off + offset;
meta->used++;
atomic_fetch_add_explicit(&ss->total_active_blocks, 1, memory_order_relaxed);
// Keep hint as long as there is remaining capacity.
if (meta->used >= meta->capacity) {
g_ss_legacy_hint_ss[class_idx] = NULL;
}
return (void*)base;
}
// ============================================================================
// Legacy Backend Implementation
// ============================================================================
/*
* Legacy backend for hak_tiny_alloc_superslab_box().
*
* Phase 12 Stage A/B:
* - Uses per-class SuperSlabHead (g_superslab_heads) as the implementation.
* - Callers MUST use hak_tiny_alloc_superslab_box() and never touch this directly.
* - Later Stage C: this function will be replaced by a shared_pool backend.
*/
void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
{
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
SuperSlabHead* head = g_superslab_heads[class_idx];
if (!head) {
head = init_superslab_head(class_idx);
if (!head) {
return NULL;
}
g_superslab_heads[class_idx] = head;
}
SuperSlab* chunk = head->current_chunk ? head->current_chunk : head->first_chunk;
while (chunk) {
int cap = ss_slabs_capacity(chunk);
for (int slab_idx = 0; slab_idx < cap; slab_idx++) {
TinySlabMeta* meta = &chunk->slabs[slab_idx];
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// Skip slabs that belong to a different class (or are uninitialized).
if (meta->class_idx != (uint8_t)class_idx) {
continue;
}
if (meta->capacity == 0) {
continue;
}
if (meta->used < meta->capacity) {
C7 Stride Upgrade: Fix 1024B→2048B alignment corruption (ROOT CAUSE) ## Problem C7 (1KB class) blocks were being carved with 1024B stride but expected to align with 2048B stride, causing systematic NXT_MISALIGN errors with characteristic pattern: delta_mod = 1026, 1028, 1030, 1032... (1024*N + offset). This caused crashes, double-frees, and alignment violations in 1024B workloads. ## Root Cause The global array `g_tiny_class_sizes[]` was correctly updated to 2048B, but `tiny_block_stride_for_class()` contained a LOCAL static const array with the old 1024B value: ```c // hakmem_tiny_superslab.h:52 (BEFORE) static const size_t class_sizes[8] = {8, 16, 32, 64, 128, 256, 512, 1024}; ^^^^ ``` This local table was used by ALL carve operations, causing every C7 block to be allocated with 1024B stride despite the 2048B upgrade. ## Fix Updated local stride table in `tiny_block_stride_for_class()`: ```c // hakmem_tiny_superslab.h:52 (AFTER) static const size_t class_sizes[8] = {8, 16, 32, 64, 128, 256, 512, 2048}; ^^^^ ``` ## Verification **Before**: NXT_MISALIGN delta_mod shows 1024B pattern (1026, 1028, 1030...) **After**: NXT_MISALIGN delta_mod shows random values (227, 994, 195...) → No more 1024B alignment pattern = stride upgrade successful ✓ ## Additional Safety Layers (Defense in Depth) 1. **Validation Logic Fix** (tiny_nextptr.h:100) - Changed stride check to use `tiny_block_stride_for_class()` (includes header) - Was using `g_tiny_class_sizes[]` (raw size without header) 2. **TLS SLL Purge** (hakmem_tiny_lazy_init.inc.h:83-87) - Clear TLS SLL on lazy class initialization - Prevents stale blocks from previous runs 3. **Pre-Carve Geometry Validation** (hakmem_tiny_refill_p0.inc.h:273-297) - Validates slab capacity matches current stride before carving - Reinitializes if geometry is stale (e.g., after stride upgrade) 4. **LRU Stride Validation** (hakmem_super_registry.c:369-458) - Validates cached SuperSlabs have compatible stride - Evicts incompatible SuperSlabs immediately 5. **Shared Pool Geometry Fix** (hakmem_shared_pool.c:722-733) - Reinitializes slab geometry on acquisition if capacity mismatches 6. **Legacy Backend Validation** (ss_legacy_backend_box.c:138-155) - Validates geometry before allocation in legacy path ## Impact - Eliminates 100% of 1024B-pattern alignment errors - Fixes crashes in 1024B workloads (bench_random_mixed 1024B now stable) - Establishes multiple validation layers to prevent future stride issues 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 22:55:17 +09:00
// CRITICAL FIX: Validate geometry matches current stride (handles C7 1024->2048 upgrade)
size_t stride = tiny_block_stride_for_class(class_idx);
C7 Stride Upgrade: Fix 1024B→2048B alignment corruption (ROOT CAUSE) ## Problem C7 (1KB class) blocks were being carved with 1024B stride but expected to align with 2048B stride, causing systematic NXT_MISALIGN errors with characteristic pattern: delta_mod = 1026, 1028, 1030, 1032... (1024*N + offset). This caused crashes, double-frees, and alignment violations in 1024B workloads. ## Root Cause The global array `g_tiny_class_sizes[]` was correctly updated to 2048B, but `tiny_block_stride_for_class()` contained a LOCAL static const array with the old 1024B value: ```c // hakmem_tiny_superslab.h:52 (BEFORE) static const size_t class_sizes[8] = {8, 16, 32, 64, 128, 256, 512, 1024}; ^^^^ ``` This local table was used by ALL carve operations, causing every C7 block to be allocated with 1024B stride despite the 2048B upgrade. ## Fix Updated local stride table in `tiny_block_stride_for_class()`: ```c // hakmem_tiny_superslab.h:52 (AFTER) static const size_t class_sizes[8] = {8, 16, 32, 64, 128, 256, 512, 2048}; ^^^^ ``` ## Verification **Before**: NXT_MISALIGN delta_mod shows 1024B pattern (1026, 1028, 1030...) **After**: NXT_MISALIGN delta_mod shows random values (227, 994, 195...) → No more 1024B alignment pattern = stride upgrade successful ✓ ## Additional Safety Layers (Defense in Depth) 1. **Validation Logic Fix** (tiny_nextptr.h:100) - Changed stride check to use `tiny_block_stride_for_class()` (includes header) - Was using `g_tiny_class_sizes[]` (raw size without header) 2. **TLS SLL Purge** (hakmem_tiny_lazy_init.inc.h:83-87) - Clear TLS SLL on lazy class initialization - Prevents stale blocks from previous runs 3. **Pre-Carve Geometry Validation** (hakmem_tiny_refill_p0.inc.h:273-297) - Validates slab capacity matches current stride before carving - Reinitializes if geometry is stale (e.g., after stride upgrade) 4. **LRU Stride Validation** (hakmem_super_registry.c:369-458) - Validates cached SuperSlabs have compatible stride - Evicts incompatible SuperSlabs immediately 5. **Shared Pool Geometry Fix** (hakmem_shared_pool.c:722-733) - Reinitializes slab geometry on acquisition if capacity mismatches 6. **Legacy Backend Validation** (ss_legacy_backend_box.c:138-155) - Validates geometry before allocation in legacy path ## Impact - Eliminates 100% of 1024B-pattern alignment errors - Fixes crashes in 1024B workloads (bench_random_mixed 1024B now stable) - Establishes multiple validation layers to prevent future stride issues 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 22:55:17 +09:00
size_t usable = (slab_idx == 0) ? SUPERSLAB_SLAB0_USABLE_SIZE : SUPERSLAB_SLAB_USABLE_SIZE;
uint16_t expect_cap = (uint16_t)(usable / stride);
if (meta->capacity != expect_cap) {
// Stale geometry detected - reinitialize slab with current stride
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[LEGACY_FIX_GEOMETRY] ss=%p slab=%d cls=%d: old_cap=%u -> new_cap=%u (stride=%zu)\n",
(void*)chunk, slab_idx, class_idx,
meta->capacity, expect_cap, stride);
g_hakmem_lock_depth--;
superslab_init_slab(chunk, slab_idx, stride, 0);
meta->class_idx = (uint8_t)class_idx;
meta = &chunk->slabs[slab_idx]; // Reload after reinit
}
size_t offset = (size_t)meta->used * stride;
uint8_t* base = (uint8_t*)chunk
+ SUPERSLAB_SLAB0_DATA_OFFSET
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE
+ offset;
hak_tiny_ss_hint_record(class_idx, chunk, slab_idx);
meta->used++;
atomic_fetch_add_explicit(&chunk->total_active_blocks, 1, memory_order_relaxed);
return (void*)base;
}
}
chunk = chunk->next_chunk;
}
if (expand_superslab_head(head) < 0) {
return NULL;
}
SuperSlab* new_chunk = head->current_chunk;
if (!new_chunk) {
return NULL;
}
int cap2 = ss_slabs_capacity(new_chunk);
for (int slab_idx = 0; slab_idx < cap2; slab_idx++) {
TinySlabMeta* meta = &new_chunk->slabs[slab_idx];
if (meta->capacity == 0) continue;
if (meta->used < meta->capacity) {
size_t stride = tiny_block_stride_for_class(class_idx);
size_t offset = (size_t)meta->used * stride;
uint8_t* base = (uint8_t*)new_chunk
+ SUPERSLAB_SLAB0_DATA_OFFSET
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE
+ offset;
hak_tiny_ss_hint_record(class_idx, new_chunk, slab_idx);
meta->used++;
atomic_fetch_add_explicit(&new_chunk->total_active_blocks, 1, memory_order_relaxed);
return (void*)base;
}
}
return NULL;
}
// ============================================================================
// SuperSlabHead Management
// ============================================================================
// Initialize SuperSlabHead for a class
SuperSlabHead* init_superslab_head(int class_idx) {
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
// Allocate SuperSlabHead structure
SuperSlabHead* head = (SuperSlabHead*)calloc(1, sizeof(SuperSlabHead));
if (!head) {
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate SuperSlabHead for class %d\n", class_idx);
g_hakmem_lock_depth--;
return NULL;
}
head->class_idx = (uint8_t)class_idx;
atomic_store_explicit(&head->total_chunks, 0, memory_order_relaxed);
head->first_chunk = NULL;
head->current_chunk = NULL;
pthread_mutex_init(&head->expansion_lock, NULL);
// Allocate initial chunk(s)
// Hot classes (1, 4, 6) get 2 initial chunks to reduce contention
int initial_chunks = 1;
// Phase 2a: Start with 1 chunk for all classes (expansion will handle growth)
// This reduces startup memory overhead while still allowing unlimited growth
initial_chunks = 1;
for (int i = 0; i < initial_chunks; i++) {
if (expand_superslab_head(head) < 0) {
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate initial chunk %d for class %d\n",
i, class_idx);
g_hakmem_lock_depth--;
// Cleanup on failure
SuperSlab* chunk = head->first_chunk;
while (chunk) {
SuperSlab* next = chunk->next_chunk;
superslab_free(chunk);
chunk = next;
}
pthread_mutex_destroy(&head->expansion_lock);
free(head);
return NULL;
}
}
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr, "[HAKMEM] Initialized SuperSlabHead for class %d: %zu initial chunks\n",
class_idx, atomic_load_explicit(&head->total_chunks, memory_order_relaxed));
#endif
g_hakmem_lock_depth--;
return head;
}
// Expand SuperSlabHead by allocating and linking a new chunk
int expand_superslab_head(SuperSlabHead* head) {
if (!head) {
return -1;
}
// Allocate new chunk via existing superslab_allocate
SuperSlab* new_chunk = superslab_allocate(head->class_idx);
if (!new_chunk) {
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate new chunk for class %d (system OOM)\n",
head->class_idx);
g_hakmem_lock_depth--;
#endif
return -1; // True OOM (system out of memory)
}
// CRITICAL FIX: Initialize slab 0 so bitmap != 0x00000000
// Phase 2a chunks must have at least one usable slab after allocation
size_t block_size = g_tiny_class_sizes[head->class_idx];
// Use pthread_self() directly since tiny_self_u32() is static inline in hakmem_tiny.c
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
superslab_init_slab(new_chunk, 0, block_size, owner_tid);
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// CRITICAL FIX: Explicitly set class_idx to avoid C0/C7 confusion.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
new_chunk->slabs[0].class_idx = (uint8_t)head->class_idx;
// Initialize the next_chunk link to NULL
new_chunk->next_chunk = NULL;
// Thread-safe linking
pthread_mutex_lock(&head->expansion_lock);
if (head->current_chunk) {
// Find the tail of the list (optimization: could cache tail pointer)
SuperSlab* tail = head->current_chunk;
while (tail->next_chunk) {
tail = tail->next_chunk;
}
tail->next_chunk = new_chunk;
} else {
// First chunk
head->first_chunk = new_chunk;
}
// Update current chunk to new chunk (for fast allocation)
head->current_chunk = new_chunk;
// Increment total chunks atomically
size_t old_count = atomic_fetch_add_explicit(&head->total_chunks, 1, memory_order_relaxed);
size_t new_count = old_count + 1;
pthread_mutex_unlock(&head->expansion_lock);
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[HAKMEM] Expanded SuperSlabHead for class %d: %zu chunks now (bitmap=0x%08x)\n",
head->class_idx, new_count, new_chunk->slab_bitmap);
g_hakmem_lock_depth--;
#endif
return 0;
}
// Find which chunk a pointer belongs to
SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx) {
if (!ptr || class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
SuperSlabHead* head = g_superslab_heads[class_idx];
if (!head) {
return NULL;
}
uintptr_t ptr_addr = (uintptr_t)ptr;
// Walk the chunk list
SuperSlab* chunk = head->first_chunk;
while (chunk) {
// Check if ptr is within this chunk's memory range
// Each chunk is aligned to SUPERSLAB_SIZE (1MB or 2MB)
uintptr_t chunk_start = (uintptr_t)chunk;
size_t chunk_size = (size_t)1 << chunk->lg_size; // Use actual chunk size
uintptr_t chunk_end = chunk_start + chunk_size;
if (ptr_addr >= chunk_start && ptr_addr < chunk_end) {
// Found the chunk
return chunk;
}
chunk = chunk->next_chunk;
}
return NULL; // Not found in any chunk
}