🎯 ROOT CAUSE: Internal allocation helpers were prematurely converting BASE → USER pointers before returning to caller. The caller then applied HAK_RET_ALLOC/tiny_region_id_write_header which performed ANOTHER BASE→USER conversion, resulting in double offset (BASE+2) and header written at wrong location. 📦 BOX THEORY SOLUTION: Establish clean pointer conversion boundary at tiny_region_id_write_header, making it the single source of truth for BASE → USER conversion. 🔧 CHANGES: - Fix #16: Remove premature BASE→USER conversions (6 locations) * core/tiny_alloc_fast.inc.h (3 fixes) * core/hakmem_tiny_refill.inc.h (2 fixes) * core/hakmem_tiny_fastcache.inc.h (1 fix) - Fix #12: Add header validation in tls_sll_pop (detect corruption) - Fix #14: Defense-in-depth header restoration in tls_sll_splice - Fix #15: USER pointer detection (for debugging) - Fix #13: Bump window header restoration - Fix #2, #6, #7, #8: Various header restoration & NULL termination 🧪 TEST RESULTS: 100% SUCCESS - 10K-500K iterations: All passed - 8 seeds × 100K: All passed (42,123,456,789,999,314,271,161) - Performance: ~630K ops/s average (stable) - Header corruption: ZERO 📋 FIXES SUMMARY: Fix #1-8: Initial header restoration & chain fixes (chatgpt-san) Fix #9-10: USER pointer auto-fix (later disabled) Fix #12: Validation system (caught corruption at call 14209) Fix #13: Bump window header writes Fix #14: Splice defense-in-depth Fix #15: USER pointer detection (debugging tool) Fix #16: Double conversion fix (FINAL SOLUTION) ✅ 🎓 LESSONS LEARNED: 1. Validation catches bugs early (Fix #12 was critical) 2. Class-specific inline logging reveals patterns (Option C) 3. Box Theory provides clean architectural boundaries 4. Multiple investigation approaches (Task/chatgpt-san collaboration) 📄 DOCUMENTATION: - P0_BUG_STATUS.md: Complete bug tracking timeline - C2_CORRUPTION_ROOT_CAUSE_FINAL.md: Detailed root cause analysis - FINAL_ANALYSIS_C2_CORRUPTION.md: Investigation methodology 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com> Co-Authored-By: Task Agent <task@anthropic.com> Co-Authored-By: ChatGPT <chatgpt@openai.com>
130 lines
4.8 KiB
C
130 lines
4.8 KiB
C
// tiny_alloc_fast_inline.h - Phase 7 Task 2: Aggressive inline TLS cache access
|
|
// Purpose: Eliminate function call overhead (5-10 cycles) in hot path
|
|
// Design: Macro-based inline expansion of TLS freelist operations
|
|
// Performance: Expected +10-15% (22M → 24-25M ops/s)
|
|
|
|
#ifndef TINY_ALLOC_FAST_INLINE_H
|
|
#define TINY_ALLOC_FAST_INLINE_H
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include "hakmem_build_flags.h"
|
|
#include "tiny_remote.h" // for TINY_REMOTE_SENTINEL (defense-in-depth)
|
|
#include "tiny_nextptr.h"
|
|
#include "tiny_region_id.h" // For HEADER_MAGIC, HEADER_CLASS_MASK (Fix #7)
|
|
|
|
// External TLS variables (defined in hakmem_tiny.c)
|
|
extern __thread void* g_tls_sll_head[TINY_NUM_CLASSES];
|
|
extern __thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES];
|
|
|
|
#ifndef TINY_NUM_CLASSES
|
|
#define TINY_NUM_CLASSES 8
|
|
#endif
|
|
|
|
// ========== Inline Macro: TLS Freelist Pop ==========
|
|
//
|
|
// Aggressive inline expansion of tiny_alloc_fast_pop()
|
|
// Saves: 5-10 cycles (function call overhead + register spilling)
|
|
//
|
|
// Assembly comparison (x86-64):
|
|
// Function call:
|
|
// push %rbx ; Save registers
|
|
// mov %edi, %ebx ; class_idx to %ebx
|
|
// call tiny_alloc_fast_pop ; Call (5-10 cycles overhead)
|
|
// pop %rbx ; Restore registers
|
|
// test %rax, %rax ; Check result
|
|
//
|
|
// Inline macro:
|
|
// mov g_tls_sll_head(%rdi), %rax ; Direct access (3-4 cycles)
|
|
// test %rax, %rax
|
|
// je .miss
|
|
// mov (%rax), %rdx
|
|
// mov %rdx, g_tls_sll_head(%rdi)
|
|
//
|
|
// Result: 5-10 fewer instructions, better register allocation
|
|
//
|
|
#define TINY_ALLOC_FAST_POP_INLINE(class_idx, ptr_out) do { \
|
|
void* _head = g_tls_sll_head[(class_idx)]; \
|
|
if (__builtin_expect(_head != NULL, 1)) { \
|
|
if (__builtin_expect((uintptr_t)_head == TINY_REMOTE_SENTINEL, 0)) { \
|
|
/* Break the chain defensively if sentinel leaked into TLS SLL */ \
|
|
g_tls_sll_head[(class_idx)] = NULL; \
|
|
if (g_tls_sll_count[(class_idx)] > 0) g_tls_sll_count[(class_idx)]--; \
|
|
(ptr_out) = NULL; \
|
|
} else { \
|
|
/* Safe load of header-aware next (avoid UB on unaligned) */ \
|
|
void* _next = tiny_next_load(_head, (class_idx)); \
|
|
g_tls_sll_head[(class_idx)] = _next; \
|
|
if (g_tls_sll_count[(class_idx)] > 0) { \
|
|
g_tls_sll_count[(class_idx)]--; \
|
|
} \
|
|
(ptr_out) = _head; \
|
|
if (__builtin_expect((class_idx) == 7, 0)) { \
|
|
*(void**)(ptr_out) = NULL; \
|
|
} \
|
|
} \
|
|
} else { \
|
|
(ptr_out) = NULL; \
|
|
} \
|
|
} while(0)
|
|
|
|
// ========== Inline Macro: TLS Freelist Push ==========
|
|
//
|
|
// Aggressive inline expansion of tiny_alloc_fast_push()
|
|
// Saves: 5-10 cycles (function call overhead)
|
|
//
|
|
// Assembly comparison:
|
|
// Function call:
|
|
// mov %rdi, %rsi ; ptr to %rsi
|
|
// mov %ebx, %edi ; class_idx to %edi
|
|
// call tiny_alloc_fast_push ; Call (5-10 cycles)
|
|
//
|
|
// Inline macro:
|
|
// mov g_tls_sll_head(%rdi), %rax ; Direct inline (2-3 cycles)
|
|
// mov %rax, (%rsi)
|
|
// mov %rsi, g_tls_sll_head(%rdi)
|
|
//
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
// ✅ FIX #7: Restore header on FREE (header-mode enabled)
|
|
// ROOT CAUSE: User may have overwritten byte 0 (header). tls_sll_splice() checks
|
|
// byte 0 for HEADER_MAGIC. Without restoration, it finds 0x00 → uses wrong offset → SEGV.
|
|
// COST: 1 byte write (~1-2 cycles per free, negligible).
|
|
#define TINY_ALLOC_FAST_PUSH_INLINE(class_idx, ptr) do { \
|
|
if ((class_idx) != 7) { \
|
|
*(uint8_t*)(ptr) = HEADER_MAGIC | ((class_idx) & HEADER_CLASS_MASK); \
|
|
} \
|
|
tiny_next_store((ptr), (class_idx), g_tls_sll_head[(class_idx)]); \
|
|
g_tls_sll_head[(class_idx)] = (ptr); \
|
|
g_tls_sll_count[(class_idx)]++; \
|
|
} while(0)
|
|
#else
|
|
#define TINY_ALLOC_FAST_PUSH_INLINE(class_idx, ptr) do { \
|
|
tiny_next_store((ptr), (class_idx), g_tls_sll_head[(class_idx)]); \
|
|
g_tls_sll_head[(class_idx)] = (ptr); \
|
|
g_tls_sll_count[(class_idx)]++; \
|
|
} while(0)
|
|
#endif
|
|
|
|
// ========== Performance Notes ==========
|
|
//
|
|
// Benchmark results (expected):
|
|
// - Random Mixed 128B: 21M → 23M ops/s (+10%)
|
|
// - Random Mixed 256B: 19M → 22M ops/s (+15%)
|
|
// - Larson 1T: 2.7M → 3.0M ops/s (+11%)
|
|
//
|
|
// Key optimizations:
|
|
// 1. No function call overhead (save 5-10 cycles)
|
|
// 2. Better register allocation (inline knows full context)
|
|
// 3. No stack frame setup/teardown
|
|
// 4. Compiler can optimize across macro boundaries
|
|
//
|
|
// Trade-offs:
|
|
// 1. Code size: +100-200 bytes (each call site expanded)
|
|
// 2. Debug visibility: Macros harder to step through
|
|
// 3. Maintenance: Changes must be kept in sync with function version
|
|
//
|
|
// Recommendation: Use inline macros for CRITICAL hot paths only
|
|
// (alloc/free fast path), keep functions for diagnostics/debugging
|
|
|
|
#endif // TINY_ALLOC_FAST_INLINE_H
|