Files
hakmem/archive/ss_legacy_backend_box.c

389 lines
14 KiB
C
Raw Normal View History

// Box: Legacy Backend (Phase 12)
// Purpose: Per-class SuperSlabHead backend (legacy implementation)
#include "ss_legacy_backend_box.h"
#include "ss_allocation_box.h"
#include "hakmem_tiny_config.h"
#include "hakmem_tiny.h" // For tiny_self_u32
#include "../tiny_region_id.h" // For tiny_region_id_write_header
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
// ============================================================================
// Global State
// ============================================================================
// Phase 2a: Dynamic Expansion - Global per-class SuperSlabHeads
SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS] = {NULL};
// Legacy fallback hint box (per-thread, per-class)
static __thread SuperSlab* g_ss_legacy_hint_ss[TINY_NUM_CLASSES_SS];
static __thread uint8_t g_ss_legacy_hint_slab[TINY_NUM_CLASSES_SS];
// ============================================================================
// Hint Box (Optional Optimization)
// ============================================================================
void hak_tiny_ss_hint_record(int class_idx, SuperSlab* ss, int slab_idx)
{
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) return;
if (!ss || slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) return;
g_ss_legacy_hint_ss[class_idx] = ss;
g_ss_legacy_hint_slab[class_idx] = (uint8_t)slab_idx;
}
void* hak_tiny_alloc_superslab_backend_hint(int class_idx)
{
static int g_hint_enabled = -1;
if (__builtin_expect(g_hint_enabled == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SS_LEGACY_HINT");
g_hint_enabled = (e && *e && *e != '0') ? 1 : 0;
}
if (!g_hint_enabled) {
return NULL;
}
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
SuperSlab* ss = g_ss_legacy_hint_ss[class_idx];
int slab_idx = (int)g_ss_legacy_hint_slab[class_idx];
if (!ss) {
return NULL;
}
// Basic sanity: Superslab still alive?
if (ss->magic != SUPERSLAB_MAGIC) {
g_ss_legacy_hint_ss[class_idx] = NULL;
return NULL;
}
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
g_ss_legacy_hint_ss[class_idx] = NULL;
return NULL;
}
TinySlabMeta* meta = &ss->slabs[slab_idx];
if (meta->capacity == 0 || meta->used >= meta->capacity) {
// Hint slab exhausted; clear and fall back.
g_ss_legacy_hint_ss[class_idx] = NULL;
return NULL;
}
if (meta->class_idx != (uint8_t)class_idx && meta->class_idx != 255) {
// Different class bound; hint no longer valid.
g_ss_legacy_hint_ss[class_idx] = NULL;
return NULL;
}
size_t stride = tiny_block_stride_for_class(class_idx);
size_t offset = (size_t)meta->used * stride;
size_t slab_base_off = SUPERSLAB_SLAB0_DATA_OFFSET
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE;
uint8_t* base = (uint8_t*)ss + slab_base_off + offset;
meta->used++;
atomic_fetch_add_explicit(&ss->total_active_blocks, 1, memory_order_relaxed);
// Keep hint as long as there is remaining capacity.
if (meta->used >= meta->capacity) {
g_ss_legacy_hint_ss[class_idx] = NULL;
}
#if HAKMEM_TINY_HEADER_CLASSIDX
return tiny_region_id_write_header(base, class_idx);
#else
return (void*)base;
#endif
}
// ============================================================================
// Legacy Backend Implementation
// ============================================================================
/*
* Legacy backend for hak_tiny_alloc_superslab_box().
*
* Phase 12 Stage A/B:
* - Uses per-class SuperSlabHead (g_superslab_heads) as the implementation.
* - Callers MUST use hak_tiny_alloc_superslab_box() and never touch this directly.
* - Later Stage C: this function will be replaced by a shared_pool backend.
*/
void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
{
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
SuperSlabHead* head = g_superslab_heads[class_idx];
if (!head) {
head = init_superslab_head(class_idx);
if (!head) {
return NULL;
}
g_superslab_heads[class_idx] = head;
}
SuperSlab* chunk = head->current_chunk ? head->current_chunk : head->first_chunk;
while (chunk) {
int cap = ss_slabs_capacity(chunk);
for (int slab_idx = 0; slab_idx < cap; slab_idx++) {
TinySlabMeta* meta = &chunk->slabs[slab_idx];
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// Skip slabs that belong to a different class (or are uninitialized).
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup This commit implements the first phase of Tiny Pool redesign based on ChatGPT architecture review. The goal is to eliminate Header/Next pointer conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata). ## P0.1: C0(8B) class upgraded to 16B - Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes) - LUT updated: 1..16 → class 0, 17..32 → class 1, etc. - tiny_next_off: C0 now uses offset 1 (header preserved) - Eliminates edge cases for 8B allocations ## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h) - New Box for draining TLS SLL before slab reuse - ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1 - Prevents stale pointers when slabs are recycled - Follows Box theory: single responsibility, minimal API ## P1.1: SuperSlab class_map addition - Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab - Maps slab_idx → class_idx for out-of-band lookup - Initialized to 255 (UNASSIGNED) on SuperSlab creation - Set correctly on slab initialization in all backends ## P1.2: Free fast path uses class_map - ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1 - Free path can now get class_idx from class_map instead of Header - Falls back to Header read if class_map returns invalid value - Fixed Legacy Backend dynamic slab initialization bug ## Documentation added - HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis - TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis - PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking - TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3) ## Test results - Baseline: 70% success rate (30% crash - pre-existing issue) - class_map enabled: 70% success rate (same as baseline) - Performance: ~30.5M ops/s (unchanged) ## Next steps (P1.3, P2, P3) - P1.3: Add meta->active for accurate TLS/freelist sync - P2: TLS SLL redesign with Box-based counting - P3: Complete Header out-of-band migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
if (meta->class_idx != (uint8_t)class_idx && meta->class_idx != 255) {
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
continue;
}
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup This commit implements the first phase of Tiny Pool redesign based on ChatGPT architecture review. The goal is to eliminate Header/Next pointer conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata). ## P0.1: C0(8B) class upgraded to 16B - Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes) - LUT updated: 1..16 → class 0, 17..32 → class 1, etc. - tiny_next_off: C0 now uses offset 1 (header preserved) - Eliminates edge cases for 8B allocations ## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h) - New Box for draining TLS SLL before slab reuse - ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1 - Prevents stale pointers when slabs are recycled - Follows Box theory: single responsibility, minimal API ## P1.1: SuperSlab class_map addition - Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab - Maps slab_idx → class_idx for out-of-band lookup - Initialized to 255 (UNASSIGNED) on SuperSlab creation - Set correctly on slab initialization in all backends ## P1.2: Free fast path uses class_map - ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1 - Free path can now get class_idx from class_map instead of Header - Falls back to Header read if class_map returns invalid value - Fixed Legacy Backend dynamic slab initialization bug ## Documentation added - HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis - TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis - PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking - TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3) ## Test results - Baseline: 70% success rate (30% crash - pre-existing issue) - class_map enabled: 70% success rate (same as baseline) - Performance: ~30.5M ops/s (unchanged) ## Next steps (P1.3, P2, P3) - P1.3: Add meta->active for accurate TLS/freelist sync - P2: TLS SLL redesign with Box-based counting - P3: Complete Header out-of-band migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
// P1.2 FIX: Initialize slab on first use (like shared backend does)
// This ensures class_map is populated for all slabs, not just slab 0
if (meta->capacity == 0) {
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup This commit implements the first phase of Tiny Pool redesign based on ChatGPT architecture review. The goal is to eliminate Header/Next pointer conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata). ## P0.1: C0(8B) class upgraded to 16B - Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes) - LUT updated: 1..16 → class 0, 17..32 → class 1, etc. - tiny_next_off: C0 now uses offset 1 (header preserved) - Eliminates edge cases for 8B allocations ## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h) - New Box for draining TLS SLL before slab reuse - ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1 - Prevents stale pointers when slabs are recycled - Follows Box theory: single responsibility, minimal API ## P1.1: SuperSlab class_map addition - Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab - Maps slab_idx → class_idx for out-of-band lookup - Initialized to 255 (UNASSIGNED) on SuperSlab creation - Set correctly on slab initialization in all backends ## P1.2: Free fast path uses class_map - ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1 - Free path can now get class_idx from class_map instead of Header - Falls back to Header read if class_map returns invalid value - Fixed Legacy Backend dynamic slab initialization bug ## Documentation added - HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis - TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis - PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking - TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3) ## Test results - Baseline: 70% success rate (30% crash - pre-existing issue) - class_map enabled: 70% success rate (same as baseline) - Performance: ~30.5M ops/s (unchanged) ## Next steps (P1.3, P2, P3) - P1.3: Add meta->active for accurate TLS/freelist sync - P2: TLS SLL redesign with Box-based counting - P3: Complete Header out-of-band migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
size_t block_size = g_tiny_class_sizes[class_idx];
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
superslab_init_slab(chunk, slab_idx, block_size, owner_tid);
meta = &chunk->slabs[slab_idx]; // Refresh pointer after init
meta->class_idx = (uint8_t)class_idx;
// P1.2: Update class_map for dynamic slab initialization
chunk->class_map[slab_idx] = (uint8_t)class_idx;
}
if (meta->used < meta->capacity) {
Code Cleanup: Remove false positives, redundant validations, and reduce verbose logging Following the C7 stride upgrade fix (commit 23c0d9541), this commit performs comprehensive cleanup to improve code quality and reduce debug noise. ## Changes ### 1. Disable False Positive Checks (tiny_nextptr.h) - **Disabled**: NXT_MISALIGN validation block with `#if 0` - **Reason**: Produces false positives due to slab base offsets (2048, 65536) not being stride-aligned, causing all blocks to appear "misaligned" - **TODO**: Reimplement to check stride DISTANCE between consecutive blocks instead of absolute alignment to stride boundaries ### 2. Remove Redundant Geometry Validations **hakmem_tiny_refill_p0.inc.h (P0 batch refill)** - Removed 25-line CARVE_GEOMETRY_FIX validation block - Replaced with NOTE explaining redundancy - **Reason**: Stride table is now correct in tiny_block_stride_for_class(), defense-in-depth validation adds overhead without benefit **ss_legacy_backend_box.c (legacy backend)** - Removed 18-line LEGACY_FIX_GEOMETRY validation block - Replaced with NOTE explaining redundancy - **Reason**: Shared_pool validates geometry at acquisition time ### 3. Reduce Verbose Logging **hakmem_shared_pool.c (sp_fix_geometry_if_needed)** - Made SP_FIX_GEOMETRY logging conditional on `!HAKMEM_BUILD_RELEASE` - **Reason**: Geometry fixes are expected during stride upgrades, no need to log in release builds ### 4. Verification - Build: ✅ Successful (LTO warnings expected) - Test: ✅ 10K iterations (1.87M ops/s, no crashes) - NXT_MISALIGN false positives: ✅ Eliminated ## Files Modified - core/tiny_nextptr.h - Disabled false positive NXT_MISALIGN check - core/hakmem_tiny_refill_p0.inc.h - Removed redundant CARVE validation - core/box/ss_legacy_backend_box.c - Removed redundant LEGACY validation - core/hakmem_shared_pool.c - Made SP_FIX_GEOMETRY logging debug-only ## Impact - **Code clarity**: Removed 43 lines of redundant validation code - **Debug noise**: Reduced false positive diagnostics - **Performance**: Eliminated overhead from redundant geometry checks - **Maintainability**: Single source of truth for geometry validation 🧹 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 23:00:24 +09:00
// NOTE: Geometry validation removed (redundant)
// Stride table is now correct in tiny_block_stride_for_class(),
// and shared_pool validates geometry at acquisition time.
size_t stride = tiny_block_stride_for_class(class_idx);
size_t offset = (size_t)meta->used * stride;
uint8_t* base = (uint8_t*)chunk
+ SUPERSLAB_SLAB0_DATA_OFFSET
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE
+ offset;
hak_tiny_ss_hint_record(class_idx, chunk, slab_idx);
meta->used++;
atomic_fetch_add_explicit(&chunk->total_active_blocks, 1, memory_order_relaxed);
#if HAKMEM_TINY_HEADER_CLASSIDX
return tiny_region_id_write_header(base, class_idx);
#else
return (void*)base;
#endif
}
}
chunk = chunk->next_chunk;
}
if (expand_superslab_head(head) < 0) {
return NULL;
}
SuperSlab* new_chunk = head->current_chunk;
if (!new_chunk) {
return NULL;
}
int cap2 = ss_slabs_capacity(new_chunk);
for (int slab_idx = 0; slab_idx < cap2; slab_idx++) {
TinySlabMeta* meta = &new_chunk->slabs[slab_idx];
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup This commit implements the first phase of Tiny Pool redesign based on ChatGPT architecture review. The goal is to eliminate Header/Next pointer conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata). ## P0.1: C0(8B) class upgraded to 16B - Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes) - LUT updated: 1..16 → class 0, 17..32 → class 1, etc. - tiny_next_off: C0 now uses offset 1 (header preserved) - Eliminates edge cases for 8B allocations ## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h) - New Box for draining TLS SLL before slab reuse - ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1 - Prevents stale pointers when slabs are recycled - Follows Box theory: single responsibility, minimal API ## P1.1: SuperSlab class_map addition - Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab - Maps slab_idx → class_idx for out-of-band lookup - Initialized to 255 (UNASSIGNED) on SuperSlab creation - Set correctly on slab initialization in all backends ## P1.2: Free fast path uses class_map - ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1 - Free path can now get class_idx from class_map instead of Header - Falls back to Header read if class_map returns invalid value - Fixed Legacy Backend dynamic slab initialization bug ## Documentation added - HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis - TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis - PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking - TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3) ## Test results - Baseline: 70% success rate (30% crash - pre-existing issue) - class_map enabled: 70% success rate (same as baseline) - Performance: ~30.5M ops/s (unchanged) ## Next steps (P1.3, P2, P3) - P1.3: Add meta->active for accurate TLS/freelist sync - P2: TLS SLL redesign with Box-based counting - P3: Complete Header out-of-band migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
// P1.2 FIX: Initialize slab on first use (like shared backend does)
if (meta->capacity == 0) {
size_t block_size = g_tiny_class_sizes[class_idx];
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
superslab_init_slab(new_chunk, slab_idx, block_size, owner_tid);
meta = &new_chunk->slabs[slab_idx]; // Refresh pointer after init
meta->class_idx = (uint8_t)class_idx;
// P1.2: Update class_map for dynamic slab initialization
new_chunk->class_map[slab_idx] = (uint8_t)class_idx;
}
if (meta->used < meta->capacity) {
size_t stride = tiny_block_stride_for_class(class_idx);
size_t offset = (size_t)meta->used * stride;
uint8_t* base = (uint8_t*)new_chunk
+ SUPERSLAB_SLAB0_DATA_OFFSET
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE
+ offset;
hak_tiny_ss_hint_record(class_idx, new_chunk, slab_idx);
meta->used++;
atomic_fetch_add_explicit(&new_chunk->total_active_blocks, 1, memory_order_relaxed);
#if HAKMEM_TINY_HEADER_CLASSIDX
return tiny_region_id_write_header(base, class_idx);
#else
return (void*)base;
#endif
}
}
return NULL;
}
// ============================================================================
// SuperSlabHead Management
// ============================================================================
// Initialize SuperSlabHead for a class
SuperSlabHead* init_superslab_head(int class_idx) {
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
// Allocate SuperSlabHead structure
SuperSlabHead* head = (SuperSlabHead*)calloc(1, sizeof(SuperSlabHead));
if (!head) {
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate SuperSlabHead for class %d\n", class_idx);
g_hakmem_lock_depth--;
return NULL;
}
head->class_idx = (uint8_t)class_idx;
atomic_store_explicit(&head->total_chunks, 0, memory_order_relaxed);
head->first_chunk = NULL;
head->current_chunk = NULL;
pthread_mutex_init(&head->expansion_lock, NULL);
// Allocate initial chunk(s)
// Hot classes (1, 4, 6) get 2 initial chunks to reduce contention
int initial_chunks = 1;
// Phase 2a: Start with 1 chunk for all classes (expansion will handle growth)
// This reduces startup memory overhead while still allowing unlimited growth
initial_chunks = 1;
for (int i = 0; i < initial_chunks; i++) {
if (expand_superslab_head(head) < 0) {
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate initial chunk %d for class %d\n",
i, class_idx);
g_hakmem_lock_depth--;
// Cleanup on failure
SuperSlab* chunk = head->first_chunk;
while (chunk) {
SuperSlab* next = chunk->next_chunk;
superslab_free(chunk);
chunk = next;
}
pthread_mutex_destroy(&head->expansion_lock);
free(head);
return NULL;
}
}
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr, "[HAKMEM] Initialized SuperSlabHead for class %d: %zu initial chunks\n",
class_idx, atomic_load_explicit(&head->total_chunks, memory_order_relaxed));
#endif
g_hakmem_lock_depth--;
return head;
}
// Expand SuperSlabHead by allocating and linking a new chunk
int expand_superslab_head(SuperSlabHead* head) {
if (!head) {
return -1;
}
// Allocate new chunk via existing superslab_allocate
SuperSlab* new_chunk = superslab_allocate(head->class_idx);
if (!new_chunk) {
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate new chunk for class %d (system OOM)\n",
head->class_idx);
g_hakmem_lock_depth--;
#endif
return -1; // True OOM (system out of memory)
}
// CRITICAL FIX: Initialize slab 0 so bitmap != 0x00000000
// Phase 2a chunks must have at least one usable slab after allocation
size_t block_size = g_tiny_class_sizes[head->class_idx];
// Use pthread_self() directly since tiny_self_u32() is static inline in hakmem_tiny.c
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
superslab_init_slab(new_chunk, 0, block_size, owner_tid);
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// CRITICAL FIX: Explicitly set class_idx to avoid C0/C7 confusion.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
new_chunk->slabs[0].class_idx = (uint8_t)head->class_idx;
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup This commit implements the first phase of Tiny Pool redesign based on ChatGPT architecture review. The goal is to eliminate Header/Next pointer conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata). ## P0.1: C0(8B) class upgraded to 16B - Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes) - LUT updated: 1..16 → class 0, 17..32 → class 1, etc. - tiny_next_off: C0 now uses offset 1 (header preserved) - Eliminates edge cases for 8B allocations ## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h) - New Box for draining TLS SLL before slab reuse - ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1 - Prevents stale pointers when slabs are recycled - Follows Box theory: single responsibility, minimal API ## P1.1: SuperSlab class_map addition - Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab - Maps slab_idx → class_idx for out-of-band lookup - Initialized to 255 (UNASSIGNED) on SuperSlab creation - Set correctly on slab initialization in all backends ## P1.2: Free fast path uses class_map - ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1 - Free path can now get class_idx from class_map instead of Header - Falls back to Header read if class_map returns invalid value - Fixed Legacy Backend dynamic slab initialization bug ## Documentation added - HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis - TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis - PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking - TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3) ## Test results - Baseline: 70% success rate (30% crash - pre-existing issue) - class_map enabled: 70% success rate (same as baseline) - Performance: ~30.5M ops/s (unchanged) ## Next steps (P1.3, P2, P3) - P1.3: Add meta->active for accurate TLS/freelist sync - P2: TLS SLL redesign with Box-based counting - P3: Complete Header out-of-band migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
// P1.1: Update class_map for legacy backend
new_chunk->class_map[0] = (uint8_t)head->class_idx;
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// Initialize the next_chunk link to NULL
new_chunk->next_chunk = NULL;
// Thread-safe linking
pthread_mutex_lock(&head->expansion_lock);
if (head->current_chunk) {
// Find the tail of the list (optimization: could cache tail pointer)
SuperSlab* tail = head->current_chunk;
while (tail->next_chunk) {
tail = tail->next_chunk;
}
tail->next_chunk = new_chunk;
} else {
// First chunk
head->first_chunk = new_chunk;
}
// Update current chunk to new chunk (for fast allocation)
head->current_chunk = new_chunk;
// Increment total chunks atomically
size_t old_count = atomic_fetch_add_explicit(&head->total_chunks, 1, memory_order_relaxed);
size_t new_count = old_count + 1;
pthread_mutex_unlock(&head->expansion_lock);
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
extern __thread int g_hakmem_lock_depth;
g_hakmem_lock_depth++;
fprintf(stderr, "[HAKMEM] Expanded SuperSlabHead for class %d: %zu chunks now (bitmap=0x%08x)\n",
head->class_idx, new_count, new_chunk->slab_bitmap);
g_hakmem_lock_depth--;
#endif
return 0;
}
// Find which chunk a pointer belongs to
SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx) {
if (!ptr || class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
SuperSlabHead* head = g_superslab_heads[class_idx];
if (!head) {
return NULL;
}
uintptr_t ptr_addr = (uintptr_t)ptr;
// Walk the chunk list
SuperSlab* chunk = head->first_chunk;
while (chunk) {
// Check if ptr is within this chunk's memory range
// Each chunk is aligned to SUPERSLAB_SIZE (1MB or 2MB)
uintptr_t chunk_start = (uintptr_t)chunk;
size_t chunk_size = (size_t)1 << chunk->lg_size; // Use actual chunk size
uintptr_t chunk_end = chunk_start + chunk_size;
if (ptr_addr >= chunk_start && ptr_addr < chunk_end) {
// Found the chunk
return chunk;
}
chunk = chunk->next_chunk;
}
return NULL; // Not found in any chunk
}