Files
hakmem/core/box/free_local_box.c
Moe Charm (CI) 855ea7223c Phase E1-CORRECT: Fix USER/BASE pointer conversion bugs in slab_index_for calls
CRITICAL BUG FIX: Phase E1 introduced 1-byte headers for ALL size classes (C0-C7),
changing the pointer contract. However, many locations still called slab_index_for()
with USER pointers (storage+1) instead of BASE pointers (storage), causing off-by-one
slab index calculations that corrupted memory.

Root Cause:
- USER pointer = BASE + 1 (returned by malloc, points past header)
- BASE pointer = storage start (where 1-byte header is written)
- slab_index_for() expects BASE pointer for correct slab boundary calculations
- Passing USER pointer → wrong slab_idx → wrong metadata → freelist corruption

Impact Before Fix:
- bench_random_mixed crashes at ~14K iterations with SEGV
- Massive C7 alignment check failures (wrong slab classification)
- Memory corruption from writing to wrong slab freelists

Fixes Applied (8 locations):

1. core/hakmem_tiny_free.inc:137
   - Added USER→BASE conversion before slab_index_for()

2. core/hakmem_tiny_ultra_simple.inc:148
   - Added USER→BASE conversion before slab_index_for()

3. core/tiny_free_fast.inc.h:220
   - Added USER→BASE conversion before slab_index_for()

4-5. core/tiny_free_magazine.inc.h:126,315
   - Added USER→BASE conversion before slab_index_for() (2 locations)

6. core/box/free_local_box.c:14,22,62
   - Added USER→BASE conversion before slab_index_for()
   - Fixed delta calculation to use BASE instead of USER
   - Fixed debug logging to use BASE instead of USER

7. core/hakmem_tiny.c:448,460,473 (tiny_debug_track_alloc_ret)
   - Added USER→BASE conversion before slab_index_for() (2 calls)
   - Fixed delta calculation to use BASE instead of USER
   - This function is called on EVERY allocation in debug builds

Results After Fix:
 bench_random_mixed stable up to 66K iterations (~4.7x improvement)
 C7 alignment check failures eliminated (was: 100% failure rate)
 Front Gate "Unknown" classification dropped to 0% (was: 1.67%)
 No segfaults for workloads up to ~33K allocations

Remaining Issue:
 Segfault still occurs at iteration 66152 (allocs=33137, frees=33014)
   - Different bug from USER/BASE conversion issues
   - Likely capacity/boundary condition (further investigation needed)

Testing:
- bench_random_mixed_hakmem 1K-66K iterations: PASS
- bench_random_mixed_hakmem 67K+ iterations: FAIL (different bug)
- bench_fixed_size_hakmem 200K iterations: PASS

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-13 05:21:36 +09:00

109 lines
5.0 KiB
C

#include "free_local_box.h"
#include "free_publish_box.h"
#include "hakmem_tiny.h"
#include "tiny_next_ptr_box.h" // Phase E1-CORRECT: Box API
void tiny_free_local_box(SuperSlab* ss, int slab_idx, TinySlabMeta* meta, void* ptr, uint32_t my_tid) {
extern _Atomic uint64_t g_free_local_box_calls;
atomic_fetch_add_explicit(&g_free_local_box_calls, 1, memory_order_relaxed);
if (!(ss && ss->magic == SUPERSLAB_MAGIC)) return;
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) return;
(void)my_tid;
// ✅ Phase E1-CORRECT: ALL classes have headers, calculate BASE pointer once
void* base = (void*)((uint8_t*)ptr - 1);
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
int actual_idx = slab_index_for(ss, base);
if (actual_idx != slab_idx) {
tiny_failfast_abort_ptr("free_local_box_idx", ss, slab_idx, ptr, "slab_idx_mismatch");
} else {
size_t blk = g_tiny_class_sizes[ss->size_class];
uint8_t* slab_base = tiny_slab_base_for(ss, slab_idx);
uintptr_t delta = (uintptr_t)base - (uintptr_t)slab_base;
if (blk == 0 || (delta % blk) != 0) {
tiny_failfast_abort_ptr("free_local_box_align", ss, slab_idx, ptr, "misaligned");
} else if (meta && delta / blk >= meta->capacity) {
tiny_failfast_abort_ptr("free_local_box_range", ss, slab_idx, ptr, "out_of_capacity");
}
}
}
void* prev = meta->freelist;
// FREELIST CORRUPTION DEBUG: Validate pointer before writing
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
size_t blk = g_tiny_class_sizes[ss->size_class];
uint8_t* base_ss = (uint8_t*)ss;
uint8_t* slab_base = tiny_slab_base_for(ss, slab_idx);
// Verify prev pointer is valid (if not NULL)
if (prev != NULL) {
uintptr_t prev_addr = (uintptr_t)prev;
uintptr_t slab_addr = (uintptr_t)slab_base;
// Check if prev is within this slab
if (prev_addr < (uintptr_t)base_ss || prev_addr >= (uintptr_t)base_ss + (2*1024*1024)) {
fprintf(stderr, "[FREE_CORRUPT] prev=%p outside SuperSlab ss=%p (cls=%u slab=%d)\n",
prev, ss, ss->size_class, slab_idx);
tiny_failfast_abort_ptr("free_local_prev_range", ss, slab_idx, ptr, "prev_outside_ss");
}
// Check alignment of prev
if ((prev_addr - slab_addr) % blk != 0) {
fprintf(stderr, "[FREE_CORRUPT] prev=%p misaligned (cls=%u slab=%d blk=%zu offset=%zu)\n",
prev, ss->size_class, slab_idx, blk, (size_t)(prev_addr - slab_addr));
fprintf(stderr, "[FREE_CORRUPT] Writing from ptr=%p, freelist was=%p\n", ptr, prev);
tiny_failfast_abort_ptr("free_local_prev_misalign", ss, slab_idx, ptr, "prev_misaligned");
}
}
fprintf(stderr, "[FREE_VERIFY] cls=%u slab=%d ptr=%p prev=%p (offset_ptr=%zu offset_prev=%zu)\n",
ss->size_class, slab_idx, ptr, prev,
(size_t)((uintptr_t)base - (uintptr_t)slab_base),
prev ? (size_t)((uintptr_t)prev - (uintptr_t)slab_base) : 0);
}
tiny_next_write(ss->size_class, ptr, prev); // Phase E1-CORRECT: Box API
meta->freelist = ptr;
// FREELIST CORRUPTION DEBUG: Verify write succeeded
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
void* readback = tiny_next_read(ss->size_class, ptr); // Phase E1-CORRECT: Box API
if (readback != prev) {
fprintf(stderr, "[FREE_CORRUPT] Wrote prev=%p to ptr=%p but read back %p!\n",
prev, ptr, readback);
fprintf(stderr, "[FREE_CORRUPT] Memory corruption detected during freelist push\n");
tiny_failfast_abort_ptr("free_local_readback", ss, slab_idx, ptr, "write_corrupted");
}
}
tiny_failfast_log("free_local_box", ss->size_class, ss, meta, ptr, prev);
// BUGFIX: Memory barrier to ensure freelist visibility before used decrement
// Without this, other threads can see new freelist but old used count (race)
atomic_thread_fence(memory_order_release);
// Optional freelist mask update on first push
do {
static int g_mask_en = -1;
if (__builtin_expect(g_mask_en == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FREELIST_MASK");
g_mask_en = (e && *e && *e != '0') ? 1 : 0;
}
if (__builtin_expect(g_mask_en, 0) && prev == NULL) {
uint32_t bit = (1u << slab_idx);
atomic_fetch_or_explicit(&ss->freelist_mask, bit, memory_order_release);
}
} while (0);
// Track local free (debug helpers may be no-op)
tiny_remote_track_on_local_free(ss, slab_idx, ptr, "local_free", my_tid);
meta->used--;
ss_active_dec_one(ss);
if (prev == NULL) {
// First-free → advertise slab to adopters
tiny_free_publish_first_free((int)ss->size_class, ss, slab_idx);
}
}