Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
275 lines
9.5 KiB
C
275 lines
9.5 KiB
C
// superslab_expansion_box.c - Box E: SuperSlab Expansion Implementation
|
|
// Purpose: Safe SuperSlab expansion with TLS state guarantee
|
|
// Box Theory: Complete encapsulation of expansion logic
|
|
//
|
|
// License: MIT
|
|
// Date: 2025-11-12
|
|
|
|
#include "superslab_expansion_box.h"
|
|
#include "../hakmem_tiny_superslab.h" // expand_superslab_head(), g_superslab_heads
|
|
#include "../hakmem_tiny_superslab_constants.h" // SUPERSLAB_SLAB0_DATA_OFFSET
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
|
|
// External SuperSlabHead array (defined in hakmem_tiny_superslab.c)
|
|
extern SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS];
|
|
|
|
// External lock depth for safe fprintf in malloc context
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
// ============================================================================
|
|
// Box E: Core API Implementation
|
|
// ============================================================================
|
|
|
|
// Note: We don't implement expansion_capture_tls_state here because it requires
|
|
// access to g_tls_slabs, which is static in hakmem_tiny.c. The caller should
|
|
// capture state directly from their local g_tls_slabs reference.
|
|
|
|
ExpansionResult expansion_expand_with_tls_guarantee(
|
|
SuperSlabHead* head,
|
|
uint8_t class_idx)
|
|
{
|
|
ExpansionResult result;
|
|
memset(&result, 0, sizeof(result));
|
|
result.success = false;
|
|
result.error_code = -2; // Invalid params
|
|
|
|
// Validate parameters
|
|
if (!head || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
return result;
|
|
}
|
|
|
|
// CRITICAL: Call existing expand_superslab_head() with mutex protection
|
|
// This function already handles:
|
|
// 1. Mutex lock/unlock (head->expansion_lock)
|
|
// 2. Double-check pattern (re-verify after lock)
|
|
// 3. Chunk allocation and linking
|
|
// 4. current_chunk update
|
|
int expand_result = expand_superslab_head(head);
|
|
|
|
if (expand_result < 0) {
|
|
// Expansion failed (OOM)
|
|
result.success = false;
|
|
result.error_code = -1; // OOM
|
|
return result;
|
|
}
|
|
|
|
// Expansion succeeded
|
|
// CRITICAL FIX: Bind slab 0 immediately to prevent NULL meta SEGV
|
|
// The new chunk always has slab 0 available after expansion
|
|
SuperSlab* new_ss = head->current_chunk;
|
|
|
|
// Initialize slab 0 metadata (set capacity, mark as active in bitmap)
|
|
extern void superslab_init_slab(SuperSlab* ss, int slab_idx, size_t block_size, uint32_t owner_tid);
|
|
extern const size_t g_tiny_class_sizes[];
|
|
|
|
uint32_t my_tid = (uint32_t)(uintptr_t)pthread_self();
|
|
size_t block_size = g_tiny_class_sizes[class_idx];
|
|
superslab_init_slab(new_ss, 0, block_size, my_tid);
|
|
|
|
// CRITICAL FIX: Explicitly set class_idx to avoid C0/C7 confusion.
|
|
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
|
|
new_ss->slabs[0].class_idx = (uint8_t)class_idx;
|
|
|
|
// Now bind slab 0 to TLS state
|
|
result.new_state.ss = new_ss;
|
|
result.new_state.class_idx = class_idx;
|
|
result.new_state.slab_idx = 0; // Always bind slab 0 after expansion
|
|
result.new_state.meta = &new_ss->slabs[0]; // Point to slab 0 metadata
|
|
|
|
// Calculate slab_base using tiny_slab_base_for_geometry logic
|
|
// Slab 0 has offset SUPERSLAB_SLAB0_DATA_OFFSET (2048 bytes)
|
|
// Formula: base = ss + (slab_idx * SLAB_SIZE) + (slab_idx == 0 ? SLAB0_OFFSET : 0)
|
|
result.new_state.slab_base = (uint8_t*)new_ss + SUPERSLAB_SLAB0_DATA_OFFSET;
|
|
|
|
// Debug: log backend used for expansion (first few only)
|
|
static _Atomic uint32_t g_ss_backend_log = 0;
|
|
uint32_t n = atomic_fetch_add_explicit(&g_ss_backend_log, 1, memory_order_relaxed);
|
|
if (n < 4) {
|
|
fprintf(stderr, "[SS_BACKEND] expand legacy cls=%d ss=%p slab_idx=0 base=%p\n",
|
|
class_idx, (void*)new_ss, result.new_state.slab_base);
|
|
}
|
|
|
|
result.success = true;
|
|
result.error_code = 0;
|
|
|
|
return result;
|
|
}
|
|
|
|
void expansion_apply_tls_state(
|
|
uint8_t class_idx,
|
|
const ExpansionTLSState* new_state,
|
|
TinyTLSSlab* tls_array)
|
|
{
|
|
if (!new_state || !tls_array || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
return;
|
|
}
|
|
|
|
TinyTLSSlab* tls = &tls_array[class_idx];
|
|
|
|
// CRITICAL FIX: Apply complete TLS state from expansion
|
|
// This ensures meta and slab_base are NEVER NULL after expansion
|
|
tls->ss = new_state->ss;
|
|
tls->meta = new_state->meta; // ✅ Now points to slab 0!
|
|
tls->slab_base = new_state->slab_base; // ✅ Now points to slab 0 base!
|
|
tls->slab_idx = new_state->slab_idx; // ✅ Now 0 (slab 0)
|
|
}
|
|
|
|
// ============================================================================
|
|
// Box E: Debug & Validation Implementation
|
|
// ============================================================================
|
|
|
|
#if !defined(HAKMEM_BUILD_RELEASE) || defined(HAKMEM_EXPANSION_BOX_DEBUG)
|
|
|
|
bool expansion_validate_tls_state(
|
|
const ExpansionTLSState* state,
|
|
const char* context)
|
|
{
|
|
if (!state) {
|
|
return false;
|
|
}
|
|
|
|
// Allow NULL ss (initial state before any allocation)
|
|
if (!state->ss) {
|
|
return true;
|
|
}
|
|
|
|
// Validate SuperSlab magic
|
|
if (state->ss->magic != SUPERSLAB_MAGIC) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VAL] %s: Invalid SuperSlab magic: 0x%016llx (expected 0x%016llx)\n",
|
|
context, (unsigned long long)state->ss->magic, (unsigned long long)SUPERSLAB_MAGIC);
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
|
|
// Validate class consistency
|
|
if (state->ss->size_class != state->class_idx) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VAL] %s: Class mismatch: ss->size_class=%u, state->class_idx=%u\n",
|
|
context, state->ss->size_class, state->class_idx);
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
|
|
// Validate slab index bounds
|
|
int capacity = (state->ss->lg_size == 21) ? 32 : 16; // 2MB=32 slabs, 1MB=16 slabs
|
|
if (state->slab_idx >= capacity) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VAL] %s: slab_idx out of bounds: %u >= %d\n",
|
|
context, state->slab_idx, capacity);
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
|
|
// Validate meta pointer alignment (should point into ss->slabs array)
|
|
if (state->meta) {
|
|
TinySlabMeta* expected_meta = &state->ss->slabs[state->slab_idx];
|
|
if (state->meta != expected_meta) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VAL] %s: meta pointer mismatch: %p (expected %p)\n",
|
|
context, (void*)state->meta, (void*)expected_meta);
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// Validate slab_base alignment (should be within SuperSlab memory range)
|
|
if (state->slab_base) {
|
|
uintptr_t ss_start = (uintptr_t)state->ss;
|
|
size_t ss_size = (size_t)1 << state->ss->lg_size;
|
|
uintptr_t ss_end = ss_start + ss_size;
|
|
uintptr_t base_addr = (uintptr_t)state->slab_base;
|
|
|
|
if (base_addr < ss_start || base_addr >= ss_end) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VAL] %s: slab_base out of range: %p (ss: %p - %p)\n",
|
|
context, (void*)state->slab_base, (void*)ss_start, (void*)ss_end);
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool expansion_verify_expansion(
|
|
SuperSlabHead* head,
|
|
const ExpansionTLSState* old_state,
|
|
const ExpansionTLSState* new_state)
|
|
{
|
|
if (!head || !old_state || !new_state) {
|
|
return false;
|
|
}
|
|
|
|
// Verify new chunk is set
|
|
if (!new_state->ss) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VERIFY] New state has NULL SuperSlab\n");
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
|
|
// Verify current_chunk was updated
|
|
if (head->current_chunk != new_state->ss) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VERIFY] current_chunk mismatch: head=%p, new_state=%p\n",
|
|
(void*)head->current_chunk, (void*)new_state->ss);
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
|
|
// Verify new chunk has available capacity (bitmap should not be full)
|
|
int capacity = (new_state->ss->lg_size == 21) ? 32 : 16;
|
|
uint32_t full_mask = (capacity >= 32) ? 0xFFFFFFFF : ((1U << capacity) - 1);
|
|
|
|
if (new_state->ss->slab_bitmap == full_mask) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VERIFY] New chunk has no free slabs: bitmap=0x%08x\n",
|
|
new_state->ss->slab_bitmap);
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
|
|
// Verify total_chunks was incremented (if we can check old value)
|
|
// Note: We can't reliably check this without capturing old value
|
|
// But we can verify it's at least 1
|
|
size_t total = atomic_load_explicit(&head->total_chunks, memory_order_relaxed);
|
|
if (total == 0) {
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[EXPANSION_VERIFY] total_chunks is 0 after expansion\n");
|
|
g_hakmem_lock_depth--;
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void expansion_log_event(
|
|
const char* event,
|
|
uint8_t class_idx,
|
|
const ExpansionTLSState* state)
|
|
{
|
|
if (!event || !state) {
|
|
return;
|
|
}
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
if (state->ss) {
|
|
fprintf(stderr, "[EXPANSION] class=%u %s: ss=%p, bitmap=0x%08x, active=%u, slab_idx=%u\n",
|
|
class_idx, event,
|
|
(void*)state->ss,
|
|
state->ss->slab_bitmap,
|
|
state->ss->active_slabs,
|
|
state->slab_idx);
|
|
} else {
|
|
fprintf(stderr, "[EXPANSION] class=%u %s: ss=NULL (initial state)\n",
|
|
class_idx, event);
|
|
}
|
|
|
|
g_hakmem_lock_depth--;
|
|
}
|
|
|
|
#endif // !HAKMEM_BUILD_RELEASE || HAKMEM_EXPANSION_BOX_DEBUG
|