Files
hakmem/core/box/tls_sll_drain_box.h

321 lines
13 KiB
C
Raw Normal View History

// tls_sll_drain_box.h - Box: TLS SLL Periodic Drain
// Purpose: Restore slab accounting consistency by periodically draining TLS SLL to slab freelists
//
// Problem:
// - Fast free path (hak_tiny_free_fast_v2) pushes to TLS SLL without decrementing meta->used
// - Slabs never appear empty → SuperSlabs never freed → LRU cache never populated
// - Result: 6,455 mmap/munmap syscalls per 200K iterations (74.8% time)
//
// Solution:
// - Every N frees (default: 1024), drain TLS SLL → slab freelist
// - This path decrements meta->used properly via tiny_free_local_box()
// - Enables empty detection → SuperSlabs freed → LRU cache functional
//
// Expected Impact:
// - mmap/munmap: 6,455 → ~100 calls (-96-97%)
// - Throughput: 563K → 8-10M ops/s (+1,300-1,700%)
//
// References:
// - Root cause: PHASE9_LRU_ARCHITECTURE_ISSUE.md
// - Design: Option B (Periodic TLS SLL Drain)
#pragma once
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <pthread.h>
#include "tls_sll_box.h" // TLS SLL operations (tls_sll_pop)
Cleanup: Fix 2 additional Class 0/7 header bugs (correctness fix) Task Agent Investigation: - Found 2 more instances of hardcoded `class_idx != 7` checks - These are real bugs (C0 also uses offset=0, not just C7) - However, NOT the root cause of 12% crash rate Bug Fixes (2 locations): 1. tls_sll_drain_box.h:190 - Path: TLS SLL drain → tiny_free_local_box() - Fix: Use tiny_header_write_for_alloc() (ALL classes) - Reason: tiny_free_local_box() reads header for class_idx 2. hakmem_tiny_refill.inc.h:384 - Path: SuperSlab refill → TLS SLL push - Fix: Use tiny_header_write_if_preserved() (C1-C6 only) - Reason: TLS SLL push needs header for validation Test Results: - Before: 12% crash rate (88/100 runs successful) - After: 12% crash rate (44/50 runs successful) - Conclusion: Correctness fix, but not primary crash cause Analysis: - Bugs are real (incorrect Class 0 handling) - Fixes don't reduce crash rate → different root cause exists - Heisenbug characteristics (disappears under gdb) - Likely: Race condition, uninitialized memory, or use-after-free Remaining Work: - 12% crash rate persists (requires different investigation) - Next: Focus on TLS initialization, race conditions, allocation paths Design Note: - tls_sll_drain_box.h uses tiny_header_write_for_alloc() because tiny_free_local_box() needs header to read class_idx - hakmem_tiny_refill.inc.h uses tiny_header_write_if_preserved() because TLS SLL push validates header (C1-C6 only) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 08:12:08 +09:00
#include "tiny_header_box.h" // Header Box: Single Source of Truth for header operations
#include "slab_recycling_box.h" // Phase 9-2: EMPTY slab recycling (SLAB_TRY_RECYCLE)
#include "../hakmem_tiny_config.h" // TINY_NUM_CLASSES
#include "../hakmem_super_registry.h" // SuperSlab lookup
Cleanup: Fix 2 additional Class 0/7 header bugs (correctness fix) Task Agent Investigation: - Found 2 more instances of hardcoded `class_idx != 7` checks - These are real bugs (C0 also uses offset=0, not just C7) - However, NOT the root cause of 12% crash rate Bug Fixes (2 locations): 1. tls_sll_drain_box.h:190 - Path: TLS SLL drain → tiny_free_local_box() - Fix: Use tiny_header_write_for_alloc() (ALL classes) - Reason: tiny_free_local_box() reads header for class_idx 2. hakmem_tiny_refill.inc.h:384 - Path: SuperSlab refill → TLS SLL push - Fix: Use tiny_header_write_if_preserved() (C1-C6 only) - Reason: TLS SLL push needs header for validation Test Results: - Before: 12% crash rate (88/100 runs successful) - After: 12% crash rate (44/50 runs successful) - Conclusion: Correctness fix, but not primary crash cause Analysis: - Bugs are real (incorrect Class 0 handling) - Fixes don't reduce crash rate → different root cause exists - Heisenbug characteristics (disappears under gdb) - Likely: Race condition, uninitialized memory, or use-after-free Remaining Work: - 12% crash rate persists (requires different investigation) - Next: Focus on TLS initialization, race conditions, allocation paths Design Note: - tls_sll_drain_box.h uses tiny_header_write_for_alloc() because tiny_free_local_box() needs header to read class_idx - hakmem_tiny_refill.inc.h uses tiny_header_write_if_preserved() because TLS SLL push validates header (C1-C6 only) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 08:12:08 +09:00
#include "../tiny_region_id.h" // HEADER_MAGIC, HEADER_CLASS_MASK
#include "free_local_box.h" // tiny_free_local_box (decrements meta->used)
// ========== ENV Configuration ==========
// Check if TLS SLL drain is enabled
// ENV: HAKMEM_TINY_SLL_DRAIN_ENABLE=1/0 (default: 1)
static inline int tls_sll_drain_is_enabled(void) {
static int g_drain_enable = -1;
if (__builtin_expect(g_drain_enable == -1, 0)) {
const char* env = getenv("HAKMEM_TINY_SLL_DRAIN_ENABLE");
if (env && *env == '0') {
g_drain_enable = 0;
fprintf(stderr, "[TLS_SLL_DRAIN] Drain DISABLED via ENV\n");
} else {
g_drain_enable = 1;
fprintf(stderr, "[TLS_SLL_DRAIN] Drain ENABLED (default)\n");
}
}
return g_drain_enable;
}
// Get drain interval (number of frees before triggering drain)
// ENV: HAKMEM_TINY_SLL_DRAIN_INTERVAL=N (default: 2048)
static inline uint32_t tls_sll_drain_get_interval(void) {
static uint32_t g_drain_interval = 0;
if (__builtin_expect(g_drain_interval == 0, 0)) {
const char* env = getenv("HAKMEM_TINY_SLL_DRAIN_INTERVAL");
if (env && *env) {
int val = atoi(env);
if (val > 0 && val <= 65536) {
g_drain_interval = (uint32_t)val;
fprintf(stderr, "[TLS_SLL_DRAIN] Interval=%u (from ENV)\n", g_drain_interval);
} else {
g_drain_interval = 2048;
fprintf(stderr, "[TLS_SLL_DRAIN] Invalid ENV value, using default=2048\n");
}
} else {
g_drain_interval = 2048;
fprintf(stderr, "[TLS_SLL_DRAIN] Interval=%u (default)\n", g_drain_interval);
}
}
return g_drain_interval;
}
// ========== Drain Counter (TLS) ==========
// Per-class drain counter (TLS, one per size class)
// Incremented on each free, triggers drain when reaching interval
static __thread uint32_t g_tls_sll_drain_counter[TINY_NUM_CLASSES] = {0};
// Debug: Total drain operations performed (all classes)
static __thread uint64_t g_tls_sll_drain_total_calls = 0;
static __thread uint64_t g_tls_sll_drain_total_blocks = 0;
// ========== Drain Implementation (Skeleton) ==========
// Box: TLS SLL Drain
// Purpose: Pop blocks from TLS SLL and push to slab freelist
//
// Flow:
// 1. Pop up to batch_size blocks from TLS SLL (g_tls_sll_head[class_idx])
// 2. For each block:
// a. Resolve SuperSlab/Slab (like slow path does)
// b. Call tiny_free_local_box() → decrements meta->used properly
// 3. Result: meta->used reflects true state, empty detection works
//
// Args:
// class_idx: Size class to drain
// batch_size: Max blocks to drain (0 = drain all)
//
// Returns: Number of blocks drained
static inline uint32_t tiny_tls_sll_drain(int class_idx, uint32_t batch_size) {
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) {
return 0;
}
// Sanity check: TLS SLL count
extern __thread TinyTLSSLL g_tls_sll[TINY_NUM_CLASSES];
uint32_t avail = g_tls_sll[class_idx].count;
if (avail == 0) {
return 0; // Nothing to drain
}
// Drain up to batch_size blocks (0 = drain all)
uint32_t to_drain = (batch_size == 0) ? avail : (avail < batch_size ? avail : batch_size);
uint32_t drained = 0;
// Phase 9-2: Track touched slabs for EMPTY recycling after drain completes
// We can't recycle inside the loop (other blocks from same slab may be queued),
// but we CAN check after all blocks are drained
#define MAX_TOUCHED_SLABS 64
struct { SuperSlab* ss; int slab_idx; } touched[MAX_TOUCHED_SLABS];
int num_touched = 0;
// Debug logging
static int g_debug = -1;
if (__builtin_expect(g_debug == -1, 0)) {
const char* env = getenv("HAKMEM_TINY_SLL_DRAIN_DEBUG");
g_debug = (env && *env && *env != '0') ? 1 : 0;
}
if (g_debug) {
fprintf(stderr, "[TLS_SLL_DRAIN] START: class=%d avail=%u to_drain=%u\n",
class_idx, avail, to_drain);
}
// External functions needed for drain
Refactor: Phase 2 Box化 - SuperSlab Lookup Box with multiple contract levels Purpose: Formalize SuperSlab lookup responsibilities with clear safety guarantees Evolution: - Phase 12: UNSAFE mask+dereference (5-10 cycles) → 12% crash rate - Phase 1b: SAFE registry lookup (50-100 cycles) → 0% crash rate - Phase 2: Box化 - multiple contracts (UNSAFE/SAFE/GUARDED) Box Pattern Benefits: 1. Clear Contracts: Each API documents preconditions and guarantees 2. Multiple Levels: Choose speed vs safety based on context 3. Future-Proof: Enables optimizations without breaking existing code API Design: - ss_lookup_unsafe(): 5-10 cycles, requires validated pointer (internal use only) - ss_lookup_safe(): 50-100 cycles, works with arbitrary pointers (recommended) - ss_lookup_guarded(): 100-200 cycles, adds integrity checks (debug only) - ss_fast_lookup(): Backward compatible (→ ss_lookup_safe) Implementation: - Created core/box/superslab_lookup_box.h with full contract documentation - Integrated into core/superslab/superslab_inline.h - ss_lookup_safe() implemented as macro to avoid circular dependency - ss_lookup_guarded() only available in debug builds - Removed conflicting extern declarations from 3 locations Testing: - Build: Success (all warnings resolved) - Crash rate: 0% (50/50 iterations passed) - Backward compatibility: Maintained via ss_fast_lookup() macro Future Optimization Opportunities (documented in Box): - Phase 2.1: Hybrid lookup (try UNSAFE first, fallback to SAFE) - Phase 2.2: Per-thread cache (1-2 cycles hit rate) - Phase 2.3: Hardware-assisted validation (PAC/CPUID) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 08:44:29 +09:00
// Note: hak_super_lookup() is defined in hakmem_super_registry.h (included transitively)
extern const size_t g_tiny_class_sizes[TINY_NUM_CLASSES]; // Block sizes (const)
// Get thread ID once (used for all blocks)
// Note: Use pthread_self() directly since tiny_self_u32() is static inline
uint32_t my_tid = (uint32_t)(uintptr_t)pthread_self();
// Drain loop: Pop blocks from TLS SLL and push to slab freelist
for (uint32_t i = 0; i < to_drain; i++) {
void* base = NULL;
if (!tls_sll_pop(class_idx, &base)) {
// TLS SLL exhausted (concurrent drain or count mismatch)
Larson double-free investigation: Add full operation lifecycle logging **Diagnostic Enhancement**: Complete malloc/free/pop operation tracing for debug **Problem**: Larson crashes with TLS_SLL_DUP at count=18, need to trace exact pointer lifecycle to identify if allocator returns duplicate addresses or if benchmark has double-free bug. **Implementation** (ChatGPT + Claude + Task collaboration): 1. **Global Operation Counter** (core/hakmem_tiny_config_box.inc:9): - Single atomic counter for all operations (malloc/free/pop) - Chronological ordering across all paths 2. **Allocation Logging** (core/hakmem_tiny_config_box.inc:148-161): - HAK_RET_ALLOC macro enhanced with operation logging - Logs first 50 class=1 allocations with ptr/base/tls_count 3. **Free Logging** (core/tiny_free_fast_v2.inc.h:222-235): - Added before tls_sll_push() call (line 221) - Logs first 50 class=1 frees with ptr/base/tls_count_before 4. **Pop Logging** (core/box/tls_sll_box.h:587-597): - Added in tls_sll_pop_impl() after successful pop - Logs first 50 class=1 pops with base/tls_count_after 5. **Drain Debug Logging** (core/box/tls_sll_drain_box.h:143-151): - Enhanced drain loop with detailed logging - Tracks pop failures and drained block counts **Initial Findings**: - First 19 operations: ALL frees, ZERO allocations, ZERO pops - OP#0006: First free of 0x...430 - OP#0018: Duplicate free of 0x...430 → TLS_SLL_DUP detected - Suggests either: (a) allocations before logging starts, or (b) Larson bug **Debug-only**: All logging gated by !HAKMEM_BUILD_RELEASE (zero cost in release) **Next Steps**: - Expand logging window to 200 operations - Log initialization phase allocations - Cross-check with Larson benchmark source **Status**: Ready for extended testing
2025-11-27 08:18:01 +09:00
if (g_debug) {
fprintf(stderr, "[TLS_SLL_DRAIN] Pop failed at i=%u/%u (TLS SLL exhausted)\n", i, to_drain);
}
break;
}
Larson double-free investigation: Add full operation lifecycle logging **Diagnostic Enhancement**: Complete malloc/free/pop operation tracing for debug **Problem**: Larson crashes with TLS_SLL_DUP at count=18, need to trace exact pointer lifecycle to identify if allocator returns duplicate addresses or if benchmark has double-free bug. **Implementation** (ChatGPT + Claude + Task collaboration): 1. **Global Operation Counter** (core/hakmem_tiny_config_box.inc:9): - Single atomic counter for all operations (malloc/free/pop) - Chronological ordering across all paths 2. **Allocation Logging** (core/hakmem_tiny_config_box.inc:148-161): - HAK_RET_ALLOC macro enhanced with operation logging - Logs first 50 class=1 allocations with ptr/base/tls_count 3. **Free Logging** (core/tiny_free_fast_v2.inc.h:222-235): - Added before tls_sll_push() call (line 221) - Logs first 50 class=1 frees with ptr/base/tls_count_before 4. **Pop Logging** (core/box/tls_sll_box.h:587-597): - Added in tls_sll_pop_impl() after successful pop - Logs first 50 class=1 pops with base/tls_count_after 5. **Drain Debug Logging** (core/box/tls_sll_drain_box.h:143-151): - Enhanced drain loop with detailed logging - Tracks pop failures and drained block counts **Initial Findings**: - First 19 operations: ALL frees, ZERO allocations, ZERO pops - OP#0006: First free of 0x...430 - OP#0018: Duplicate free of 0x...430 → TLS_SLL_DUP detected - Suggests either: (a) allocations before logging starts, or (b) Larson bug **Debug-only**: All logging gated by !HAKMEM_BUILD_RELEASE (zero cost in release) **Next Steps**: - Expand logging window to 200 operations - Log initialization phase allocations - Cross-check with Larson benchmark source **Status**: Ready for extended testing
2025-11-27 08:18:01 +09:00
if (g_debug && i < 5) {
fprintf(stderr, "[TLS_SLL_DRAIN] Popped %u/%u: class=%d base=%p\n", i+1, to_drain, class_idx, base);
}
// Resolve SuperSlab/Slab (like slow path does)
SuperSlab* ss = hak_super_lookup(base);
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
// CRITICAL FIX (2025-11-27): Don't push back - causes duplicates!
// Problem: Pushback bypasses duplicate checking and creates cycles
// Old buggy approach: Push back to TLS SLL → pointer at BOTH position 0 and position N
// New approach: Skip this pointer (accept rare leak) to avoid duplicates
// Leak is acceptable because SuperSlab lookup failure is transient/rare
if (g_debug) {
fprintf(stderr, "[TLS_SLL_DRAIN] SKIP: class=%d base=%p (invalid SuperSlab, pointer leaked)\n",
class_idx, base);
}
// DO NOT push back - would create duplicate!
// Just continue to next pointer
continue;
}
// Get slab index
int slab_idx = slab_index_for(ss, base);
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
// CRITICAL FIX (2025-11-27): Don't push back - causes duplicates!
if (g_debug) {
fprintf(stderr, "[TLS_SLL_DRAIN] SKIP: class=%d base=%p (invalid slab_idx=%d, pointer leaked)\n",
class_idx, base, slab_idx);
}
// DO NOT push back - would create duplicate!
continue;
}
// Get slab metadata
TinySlabMeta* meta = &ss->slabs[slab_idx];
Cleanup: Fix 2 additional Class 0/7 header bugs (correctness fix) Task Agent Investigation: - Found 2 more instances of hardcoded `class_idx != 7` checks - These are real bugs (C0 also uses offset=0, not just C7) - However, NOT the root cause of 12% crash rate Bug Fixes (2 locations): 1. tls_sll_drain_box.h:190 - Path: TLS SLL drain → tiny_free_local_box() - Fix: Use tiny_header_write_for_alloc() (ALL classes) - Reason: tiny_free_local_box() reads header for class_idx 2. hakmem_tiny_refill.inc.h:384 - Path: SuperSlab refill → TLS SLL push - Fix: Use tiny_header_write_if_preserved() (C1-C6 only) - Reason: TLS SLL push needs header for validation Test Results: - Before: 12% crash rate (88/100 runs successful) - After: 12% crash rate (44/50 runs successful) - Conclusion: Correctness fix, but not primary crash cause Analysis: - Bugs are real (incorrect Class 0 handling) - Fixes don't reduce crash rate → different root cause exists - Heisenbug characteristics (disappears under gdb) - Likely: Race condition, uninitialized memory, or use-after-free Remaining Work: - 12% crash rate persists (requires different investigation) - Next: Focus on TLS initialization, race conditions, allocation paths Design Note: - tls_sll_drain_box.h uses tiny_header_write_for_alloc() because tiny_free_local_box() needs header to read class_idx - hakmem_tiny_refill.inc.h uses tiny_header_write_if_preserved() because TLS SLL push validates header (C1-C6 only) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 08:12:08 +09:00
// CRITICAL FIX: Restore header BEFORE calling tiny_free_local_box()
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup This commit implements the first phase of Tiny Pool redesign based on ChatGPT architecture review. The goal is to eliminate Header/Next pointer conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata). ## P0.1: C0(8B) class upgraded to 16B - Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes) - LUT updated: 1..16 → class 0, 17..32 → class 1, etc. - tiny_next_off: C0 now uses offset 1 (header preserved) - Eliminates edge cases for 8B allocations ## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h) - New Box for draining TLS SLL before slab reuse - ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1 - Prevents stale pointers when slabs are recycled - Follows Box theory: single responsibility, minimal API ## P1.1: SuperSlab class_map addition - Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab - Maps slab_idx → class_idx for out-of-band lookup - Initialized to 255 (UNASSIGNED) on SuperSlab creation - Set correctly on slab initialization in all backends ## P1.2: Free fast path uses class_map - ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1 - Free path can now get class_idx from class_map instead of Header - Falls back to Header read if class_map returns invalid value - Fixed Legacy Backend dynamic slab initialization bug ## Documentation added - HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis - TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis - PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking - TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3) ## Test results - Baseline: 70% success rate (30% crash - pre-existing issue) - class_map enabled: 70% success rate (same as baseline) - Performance: ~30.5M ops/s (unchanged) ## Next steps (P1.3, P2, P3) - P1.3: Add meta->active for accurate TLS/freelist sync - P2: TLS SLL redesign with Box-based counting - P3: Complete Header out-of-band migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
// This ensures tiny_free_local_box() can read class_idx from header
Cleanup: Fix 2 additional Class 0/7 header bugs (correctness fix) Task Agent Investigation: - Found 2 more instances of hardcoded `class_idx != 7` checks - These are real bugs (C0 also uses offset=0, not just C7) - However, NOT the root cause of 12% crash rate Bug Fixes (2 locations): 1. tls_sll_drain_box.h:190 - Path: TLS SLL drain → tiny_free_local_box() - Fix: Use tiny_header_write_for_alloc() (ALL classes) - Reason: tiny_free_local_box() reads header for class_idx 2. hakmem_tiny_refill.inc.h:384 - Path: SuperSlab refill → TLS SLL push - Fix: Use tiny_header_write_if_preserved() (C1-C6 only) - Reason: TLS SLL push needs header for validation Test Results: - Before: 12% crash rate (88/100 runs successful) - After: 12% crash rate (44/50 runs successful) - Conclusion: Correctness fix, but not primary crash cause Analysis: - Bugs are real (incorrect Class 0 handling) - Fixes don't reduce crash rate → different root cause exists - Heisenbug characteristics (disappears under gdb) - Likely: Race condition, uninitialized memory, or use-after-free Remaining Work: - 12% crash rate persists (requires different investigation) - Next: Focus on TLS initialization, race conditions, allocation paths Design Note: - tls_sll_drain_box.h uses tiny_header_write_for_alloc() because tiny_free_local_box() needs header to read class_idx - hakmem_tiny_refill.inc.h uses tiny_header_write_if_preserved() because TLS SLL push validates header (C1-C6 only) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 08:12:08 +09:00
// Uses Header Box API (ALL classes - tiny_free_local_box needs header)
tiny_header_write_for_alloc(base, class_idx);
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup This commit implements the first phase of Tiny Pool redesign based on ChatGPT architecture review. The goal is to eliminate Header/Next pointer conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata). ## P0.1: C0(8B) class upgraded to 16B - Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes) - LUT updated: 1..16 → class 0, 17..32 → class 1, etc. - tiny_next_off: C0 now uses offset 1 (header preserved) - Eliminates edge cases for 8B allocations ## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h) - New Box for draining TLS SLL before slab reuse - ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1 - Prevents stale pointers when slabs are recycled - Follows Box theory: single responsibility, minimal API ## P1.1: SuperSlab class_map addition - Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab - Maps slab_idx → class_idx for out-of-band lookup - Initialized to 255 (UNASSIGNED) on SuperSlab creation - Set correctly on slab initialization in all backends ## P1.2: Free fast path uses class_map - ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1 - Free path can now get class_idx from class_map instead of Header - Falls back to Header read if class_map returns invalid value - Fixed Legacy Backend dynamic slab initialization bug ## Documentation added - HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis - TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis - PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking - TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3) ## Test results - Baseline: 70% success rate (30% crash - pre-existing issue) - class_map enabled: 70% success rate (same as baseline) - Performance: ~30.5M ops/s (unchanged) ## Next steps (P1.3, P2, P3) - P1.3: Add meta->active for accurate TLS/freelist sync - P2: TLS SLL redesign with Box-based counting - P3: Complete Header out-of-band migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
// Convert BASE → USER pointer (add 1 byte header offset)
// Phase E1: ALL classes (C0-C7) have 1-byte header
void* user_ptr = (char*)base + 1;
// Call tiny_free_local_box() to:
// 1. Push block to slab freelist
// 2. Decrement meta->used (THIS IS THE KEY!)
tiny_free_local_box(ss, slab_idx, meta, user_ptr, my_tid);
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup This commit implements the first phase of Tiny Pool redesign based on ChatGPT architecture review. The goal is to eliminate Header/Next pointer conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata). ## P0.1: C0(8B) class upgraded to 16B - Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes) - LUT updated: 1..16 → class 0, 17..32 → class 1, etc. - tiny_next_off: C0 now uses offset 1 (header preserved) - Eliminates edge cases for 8B allocations ## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h) - New Box for draining TLS SLL before slab reuse - ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1 - Prevents stale pointers when slabs are recycled - Follows Box theory: single responsibility, minimal API ## P1.1: SuperSlab class_map addition - Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab - Maps slab_idx → class_idx for out-of-band lookup - Initialized to 255 (UNASSIGNED) on SuperSlab creation - Set correctly on slab initialization in all backends ## P1.2: Free fast path uses class_map - ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1 - Free path can now get class_idx from class_map instead of Header - Falls back to Header read if class_map returns invalid value - Fixed Legacy Backend dynamic slab initialization bug ## Documentation added - HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis - TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis - PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking - TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3) ## Test results - Baseline: 70% success rate (30% crash - pre-existing issue) - class_map enabled: 70% success rate (same as baseline) - Performance: ~30.5M ops/s (unchanged) ## Next steps (P1.3, P2, P3) - P1.3: Add meta->active for accurate TLS/freelist sync - P2: TLS SLL redesign with Box-based counting - P3: Complete Header out-of-band migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
#if !HAKMEM_BUILD_RELEASE
// Trace drain operation (debug only)
extern void ptr_trace_record_impl(int event, void* ptr, int class_idx, uint64_t op_num,
void* aux_ptr, uint32_t aux_u32, int aux_int,
const char* file, int line);
extern _Atomic uint64_t g_ptr_trace_op_counter;
uint64_t _trace_op = atomic_fetch_add_explicit(&g_ptr_trace_op_counter, 1, memory_order_relaxed);
ptr_trace_record_impl(5 /*PTR_EVENT_DRAIN_TO_FREELIST*/, base, class_idx, _trace_op,
NULL, avail, 0, __FILE__, __LINE__);
#endif
drained++;
// Phase 9-2: Track touched slab for later EMPTY check
// We track (ss, slab_idx) pairs to check after loop completes
int already_tracked = 0;
for (int t = 0; t < num_touched; t++) {
if (touched[t].ss == ss && touched[t].slab_idx == slab_idx) {
already_tracked = 1;
break;
}
}
if (!already_tracked && num_touched < MAX_TOUCHED_SLABS) {
touched[num_touched].ss = ss;
touched[num_touched].slab_idx = slab_idx;
num_touched++;
}
Fix C7 TLS SLL corruption: Protect next pointer from user data overwrites ## Root Cause C7 (1024B allocations, 2048B stride) was using offset=1 for freelist next pointers, storing them at `base[1..8]`. Since user pointer is `base+1`, users could overwrite the next pointer area, corrupting the TLS SLL freelist. ## The Bug Sequence 1. Block freed → TLS SLL push stores next at `base[1..8]` 2. Block allocated → User gets `base+1`, can modify `base[1..2047]` 3. User writes data → Overwrites `base[1..8]` (next pointer area!) 4. Block freed again → tiny_next_load() reads garbage from `base[1..8]` 5. TLS SLL head becomes invalid (0xfe, 0xdb, 0x58, etc.) ## Why This Was Reverted Previous fix (C7 offset=0) was reverted with comment: "C7も header を保持して class 判別を壊さないことを優先" (Prioritize preserving C7 header to avoid breaking class identification) This reasoning was FLAWED because: - Header IS restored during allocation (HAK_RET_ALLOC), not freelist ops - Class identification at free time reads from ptr-1 = base[0] (after restoration) - During freelist, header CAN be sacrificed (not visible to user) - The revert CREATED the race condition by exposing base[1..8] to user ## Fix Applied ### 1. Revert C7 offset to 0 (tiny_nextptr.h:54) ```c // BEFORE (BROKEN): return (class_idx == 0) ? 0u : 1u; // AFTER (FIXED): return (class_idx == 0 || class_idx == 7) ? 0u : 1u; ``` ### 2. Remove C7 header restoration in freelist (tiny_nextptr.h:84) ```c // BEFORE (BROKEN): if (class_idx != 0) { // Restores header for all classes including C7 // AFTER (FIXED): if (class_idx != 0 && class_idx != 7) { // Only C1-C6 restore headers ``` ### 3. Bonus: Remove premature slab release (tls_sll_drain_box.h:182-189) Removed `shared_pool_release_slab()` call from drain path that could cause use-after-free when blocks from same slab remain in TLS SLL. ## Why This Fix Works **Memory Layout** (C7 in freelist): ``` Address: base base+1 base+2048 ┌────┬──────────────────────┐ Content: │next│ (user accessible) │ └────┴──────────────────────┘ 8B ptr ← USER CANNOT TOUCH base[0] ``` - **Next pointer at base[0]**: Protected from user modification ✓ - **User pointer at base+1**: User sees base[1..2047] only ✓ - **Header restored during allocation**: HAK_RET_ALLOC writes 0xa7 at base[0] ✓ - **Class ID preserved**: tiny_region_id_read_header(ptr) reads ptr-1 = base[0] ✓ ## Verification Results ### Before Fix - **Errors**: 33 TLS_SLL_POP_INVALID per 100K iterations (0.033%) - **Performance**: 1.8M ops/s (corruption caused slow path fallback) - **Symptoms**: Invalid TLS SLL heads (0xfe, 0xdb, 0x58, 0x80, 0xc2, etc.) ### After Fix - **Errors**: 0 per 200K iterations ✅ - **Performance**: 10.0M ops/s (+456%!) ✅ - **C7 direct test**: 5.5M ops/s, 100K iterations, 0 errors ✅ ## Files Modified - core/tiny_nextptr.h (lines 49-54, 82-84) - C7 offset=0, no header restoration - core/box/tls_sll_drain_box.h (lines 182-189) - Remove premature slab release ## Architectural Lesson **Design Principle**: Freelist metadata MUST be stored in memory NOT accessible to user. | Class | Offset | Next Storage | User Access | Result | |-------|--------|--------------|-------------|--------| | C0 | 0 | base[0] | base[1..7] | Safe ✓ | | C1-C6 | 1 | base[1..8] | base[1..N] | Safe (header at base[0]) ✓ | | C7 (broken) | 1 | base[1..8] | base[1..2047] | **CORRUPTED** ✗ | | C7 (fixed) | 0 | base[0] | base[1..2047] | Safe ✓ | 🧹 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 23:42:43 +09:00
// BUG FIX: DO NOT release slab here even if meta->used == 0
// Reason: Other blocks from the same slab may still be queued in TLS SLL
// waiting to be drained. Releasing the slab prematurely causes:
// 1. SuperSlab reused for different class
// 2. hak_super_lookup() returns NULL for remaining blocks
// 3. TLS_SLL_POP_INVALID errors and corruption
// Solution: Let LRU eviction and normal lifecycle handle empty slab release.
// Empty slabs will naturally be reclaimed when SuperSlab is idle.
}
// Phase 9-2: Check touched slabs and recycle if EMPTY
// Now that ALL blocks have been drained, it's safe to check for EMPTY slabs
// This fixes the bug where EMPTY slabs accumulate and never return to freelist
for (int t = 0; t < num_touched; t++) {
SuperSlab* ss = touched[t].ss;
int slab_idx = touched[t].slab_idx;
TinySlabMeta* meta = &ss->slabs[slab_idx];
SLAB_TRY_RECYCLE(ss, slab_idx, meta);
}
if (g_debug && drained > 0) {
fprintf(stderr, "[TLS_SLL_DRAIN] END: class=%d drained=%u remaining=%u\n",
class_idx, drained, g_tls_sll[class_idx].count);
}
// Update stats
g_tls_sll_drain_total_calls++;
g_tls_sll_drain_total_blocks += drained;
return drained;
}
// ========== Drain Trigger (Called from Fast Free Path) ==========
// Box: Try Drain (with counter trigger)
// Purpose: Check drain counter and trigger drain if interval reached
//
// Flow:
// 1. Increment drain counter for this class
// 2. If counter >= interval, trigger drain and reset counter
// 3. Otherwise, do nothing (fast path continues)
//
// Args:
// class_idx: Size class that was just freed
//
// Returns: Number of blocks drained (0 if no drain)
static inline uint32_t tiny_tls_sll_try_drain(int class_idx) {
// Check if drain is enabled
if (__builtin_expect(!tls_sll_drain_is_enabled(), 0)) {
return 0;
}
// Increment counter
g_tls_sll_drain_counter[class_idx]++;
// Check if interval reached
uint32_t interval = tls_sll_drain_get_interval();
if (__builtin_expect(g_tls_sll_drain_counter[class_idx] >= interval, 0)) {
// Trigger drain (drain ALL blocks to enable empty detection)
// batch_size=0 means drain all available blocks
uint32_t drained = tiny_tls_sll_drain(class_idx, 0);
// Reset counter
g_tls_sll_drain_counter[class_idx] = 0;
return drained;
}
return 0; // No drain triggered
}
// ========== Debug Stats (Destructor) ==========
#if !HAKMEM_BUILD_RELEASE
static void tls_sll_drain_print_stats(void) __attribute__((destructor));
static void tls_sll_drain_print_stats(void) {
if (g_tls_sll_drain_total_calls > 0) {
fprintf(stderr, "[TLS_SLL_DRAIN_STATS] Total drains: %lu, Total blocks: %lu, Avg: %.2f\n",
g_tls_sll_drain_total_calls,
g_tls_sll_drain_total_blocks,
(double)g_tls_sll_drain_total_blocks / g_tls_sll_drain_total_calls);
}
}
#endif