Files
hakmem/core/tiny_alloc_fast_inline.h
Moe Charm (CI) 19ce4c1ac4 Add SuperSlab refcount pinning and critical failsafe guards
Major breakthrough: sh8bench now completes without SIGSEGV!
Added defensive refcounting and failsafe mechanisms to prevent
use-after-free and corruption propagation.

Changes:
1. SuperSlab Refcount Pinning (core/box/tls_sll_box.h)
   - tls_sll_push_impl: increment refcount before adding to list
   - tls_sll_pop_impl: decrement refcount when removing from list
   - Prevents SuperSlab from being freed while TLS SLL holds pointers

2. SuperSlab Release Guards (core/superslab_allocate.c, shared_pool_release.c)
   - Check refcount > 0 before freeing SuperSlab
   - If refcount > 0, defer release instead of freeing
   - Prevents use-after-free when TLS/remote/freelist hold stale pointers

3. TLS SLL Next Pointer Validation (core/box/tls_sll_box.h)
   - Detect invalid next pointer during traversal
   - Log [TLS_SLL_NEXT_INVALID] when detected
   - Drop list to prevent corruption propagation

4. Unified Cache Freelist Validation (core/front/tiny_unified_cache.c)
   - Validate freelist head before use
   - Log [UNIFIED_FREELIST_INVALID] for corrupted lists
   - Defensive drop to prevent bad allocations

5. Early Refcount Decrement Fix (core/tiny_free_fast.inc.h)
   - Removed ss_active_dec_one from fast path
   - Prevents premature refcount depletion
   - Defers decrement to proper cleanup path

Test Results:
 sh8bench completes successfully (exit code 0)
 No SIGSEGV or ABORT signals
 Short runs (5s) crash-free
⚠️ Multiple [TLS_SLL_NEXT_INVALID] / [UNIFIED_FREELIST_INVALID] logged
⚠️ Invalid pointers still present (stale references exist)

Status Analysis:
- Stability: ACHIEVED (no crashes)
- Root Cause: NOT FULLY SOLVED (invalid pointers remain)
- Approach: Defensive + refcount guards working well

Remaining Issues:
 Why does SuperSlab get unregistered while TLS SLL holds pointers?
 SuperSlab lifecycle: remote_queue / adopt / LRU interactions?
 Stale pointers indicate improper SuperSlab lifetime management

Performance Impact:
- Refcount operations: +1-3 cycles per push/pop (minor)
- Validation checks: +2-5 cycles (minor)
- Overall: < 5% overhead estimated

Next Investigation:
- Trace SuperSlab lifecycle (allocation → registration → unregister → free)
- Check remote_queue handling
- Verify adopt/LRU mechanisms
- Correlate stale pointer logs with SuperSlab unregister events

Log Volume Warning:
- May produce many diagnostic logs on long runs
- Consider ENV gating for production

Technical Notes:
- Refcount is per-SuperSlab, not global
- Guards prevent symptom propagation, not root cause
- Root cause is in SuperSlab lifecycle management

🤖 Generated with Claude Code (https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-03 21:56:52 +09:00

161 lines
6.6 KiB
C

// tiny_alloc_fast_inline.h - Phase 7 Task 2: Aggressive inline TLS cache access
// Purpose: Eliminate function call overhead (5-10 cycles) in hot path
// Design: Macro-based inline expansion of TLS freelist operations
// Performance: Expected +10-15% (22M → 24-25M ops/s)
#ifndef TINY_ALLOC_FAST_INLINE_H
#define TINY_ALLOC_FAST_INLINE_H
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include "hakmem_build_flags.h"
#include "tiny_remote.h" // for TINY_REMOTE_SENTINEL (defense-in-depth)
#include "box/tiny_next_ptr_box.h" // Phase E1-CORRECT: unified next pointer API
#include "tiny_region_id.h" // For HEADER_MAGIC, HEADER_CLASS_MASK (Fix #7)
#include "box/tls_sll_box.h"
// External TLS variables (defined in hakmem_tiny.c)
// Phase 3d-B: TLS Cache Merge - Unified TLS SLL structure
extern __thread TinyTLSSLL g_tls_sll[TINY_NUM_CLASSES];
extern __thread const char* g_tls_sll_last_writer[TINY_NUM_CLASSES];
#ifndef TINY_NUM_CLASSES
#define TINY_NUM_CLASSES 8
#endif
// ========== Inline Macro: TLS Freelist Pop ==========
//
// Aggressive inline expansion of tiny_alloc_fast_pop()
// Saves: 5-10 cycles (function call overhead + register spilling)
//
// Assembly comparison (x86-64):
// Function call:
// push %rbx ; Save registers
// mov %edi, %ebx ; class_idx to %ebx
// call tiny_alloc_fast_pop ; Call (5-10 cycles overhead)
// pop %rbx ; Restore registers
// test %rax, %rax ; Check result
//
// Inline macro:
// mov g_tls_sll_head(%rdi), %rax ; Direct access (3-4 cycles)
// test %rax, %rax
// je .miss
// mov (%rax), %rdx
// mov %rdx, g_tls_sll_head(%rdi)
//
// Result: 5-10 fewer instructions, better register allocation
//
#define TINY_ALLOC_FAST_POP_INLINE(class_idx, ptr_out) do { \
extern int g_tls_sll_class_mask; \
if (__builtin_expect(((g_tls_sll_class_mask & (1u << (class_idx))) == 0), 0)) { \
(ptr_out) = NULL; \
break; \
} \
void* _head = g_tls_sll[(class_idx)].head; \
if (__builtin_expect(_head != NULL, 1)) { \
if (__builtin_expect((uintptr_t)_head == TINY_REMOTE_SENTINEL, 0)) { \
/* Break the chain defensively if sentinel leaked into TLS SLL */ \
tls_sll_set_head_raw((class_idx), NULL, "fast_pop_sentinel"); \
g_tls_sll_last_writer[(class_idx)] = "fast_pop_sentinel"; \
if (g_tls_sll[(class_idx)].count > 0) g_tls_sll[(class_idx)].count--; \
(ptr_out) = NULL; \
} else { \
/* Phase E1-CORRECT: Use Box API for next pointer read */ \
void* _next = tiny_next_read(class_idx, _head); \
if (__builtin_expect(class_idx == 4 || class_idx == 6, 0)) { \
tls_sll_diag_next(class_idx, _head, _next, "fast_pop_next"); \
} \
tls_sll_set_head_raw((class_idx), _next, "fast_pop"); \
if ((class_idx == 4 || class_idx == 6) && _next && ((uintptr_t)_next < 4096 || (uintptr_t)_next > 0x00007fffffffffffULL)) { \
static __thread uint8_t s_fast_pop_invalid_log[8] = {0}; \
if (s_fast_pop_invalid_log[(class_idx)] < 4) { \
fprintf(stderr, "[TLS_SLL_FAST_POP_INVALID] cls=%d head=%p next=%p\n", (class_idx), _head, _next); \
s_fast_pop_invalid_log[(class_idx)]++; \
} \
tls_sll_set_head_raw((class_idx), NULL, "fast_pop_post_invalid"); \
/* keep count unchanged to flag drop */ \
g_tls_sll_last_writer[(class_idx)] = "fast_pop_post_invalid"; \
(ptr_out) = NULL; \
} else { \
if (g_tls_sll[(class_idx)].count > 0) { \
g_tls_sll[(class_idx)].count--; \
} \
/* Phase 7: Fast path returns BASE pointer; HAK_RET_ALLOC does BASE→USER */ \
(ptr_out) = _head; \
} \
} \
} else { \
(ptr_out) = NULL; \
} \
} while(0)
// ========== Inline Macro: TLS Freelist Push ==========
//
// Aggressive inline expansion of tiny_alloc_fast_push()
// Saves: 5-10 cycles (function call overhead)
//
// Assembly comparison:
// Function call:
// mov %rdi, %rsi ; ptr to %rsi
// mov %ebx, %edi ; class_idx to %edi
// call tiny_alloc_fast_push ; Call (5-10 cycles)
//
// Inline macro:
// mov g_tls_sll_head(%rdi), %rax ; Direct inline (2-3 cycles)
// mov %rax, (%rsi)
// mov %rsi, g_tls_sll_head(%rdi)
//
#if HAKMEM_TINY_HEADER_CLASSIDX
// DESIGN RULE: "Header is written by BOTH Alloc and Free/Drain"
// FREE path: Restore header for Class 1-6, then write Next pointer
// ALLOC path: Write header before returning to user (HAK_RET_ALLOC)
// This ensures Free path can read header to determine class_idx
#define TINY_ALLOC_FAST_PUSH_INLINE(class_idx, ptr) do { \
extern int g_tls_sll_class_mask; \
if (__builtin_expect(((g_tls_sll_class_mask & (1u << (class_idx))) == 0), 0)) { \
break; \
} \
if (!(ptr)) break; \
/* Phase E1-CORRECT: API ptr is USER pointer (= base+1). Convert back to BASE. */ \
uint8_t* _base = (uint8_t*)(ptr) - 1; \
/* C0-C6: Restore header BEFORE writing Next. C7: skip (next overwrites header). */ \
if ((class_idx) != 7) { \
*_base = HEADER_MAGIC | ((class_idx) & HEADER_CLASS_MASK); \
} \
/* Link node using BASE as the canonical SLL node address. */ \
tiny_next_write((class_idx), _base, g_tls_sll[(class_idx)].head); \
tls_sll_set_head_raw((class_idx), _base, "fast_push"); \
g_tls_sll[(class_idx)].count++; \
} while(0)
#else
#define TINY_ALLOC_FAST_PUSH_INLINE(class_idx, ptr) do { \
tiny_next_write(class_idx, (ptr), g_tls_sll[(class_idx)].head); \
tls_sll_set_head_raw((class_idx), (ptr), "fast_push"); \
g_tls_sll[(class_idx)].count++; \
} while(0)
#endif
// ========== Performance Notes ==========
//
// Benchmark results (expected):
// - Random Mixed 128B: 21M → 23M ops/s (+10%)
// - Random Mixed 256B: 19M → 22M ops/s (+15%)
// - Larson 1T: 2.7M → 3.0M ops/s (+11%)
//
// Key optimizations:
// 1. No function call overhead (save 5-10 cycles)
// 2. Better register allocation (inline knows full context)
// 3. No stack frame setup/teardown
// 4. Compiler can optimize across macro boundaries
//
// Trade-offs:
// 1. Code size: +100-200 bytes (each call site expanded)
// 2. Debug visibility: Macros harder to step through
// 3. Maintenance: Changes must be kept in sync with function version
//
// Recommendation: Use inline macros for CRITICAL hot paths only
// (alloc/free fast path), keep functions for diagnostics/debugging
#endif // TINY_ALLOC_FAST_INLINE_H