Files
hakmem/core/box/free_local_box.c
Moe Charm (CI) b7021061b8 Fix: CRITICAL double-allocation bug in trc_linear_carve()
Root Cause:
trc_linear_carve() used meta->used as cursor, but meta->used decrements
on free, causing already-allocated blocks to be re-carved.

Evidence:
- [LINEAR_CARVE] used=61 batch=1 → block 61 created
- (blocks freed, used decrements 62→59)
- [LINEAR_CARVE] used=59 batch=3 → blocks 59,60,61 RE-CREATED!
- Result: double-allocation → memory corruption → SEGV

Fix Implementation:
1. Added TinySlabMeta.carved (monotonic counter, never decrements)
2. Changed trc_linear_carve() to use carved instead of used
3. carved tracks carve progress, used tracks active count

Files Modified:
- core/superslab/superslab_types.h: Add carved field
- core/tiny_refill_opt.h: Use carved in trc_linear_carve()
- core/hakmem_tiny_superslab.c: Initialize carved=0
- core/tiny_alloc_fast.inc.h: Add next pointer validation
- core/hakmem_tiny_free.inc: Add drain/free validation

Test Results:
 bench_random_mixed: 950,037 ops/s (no crash)
 Fail-fast mode: 651,627 ops/s (with diagnostic logs)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-08 01:18:37 +09:00

105 lines
4.7 KiB
C

#include "free_local_box.h"
#include "free_publish_box.h"
#include "hakmem_tiny.h"
void tiny_free_local_box(SuperSlab* ss, int slab_idx, TinySlabMeta* meta, void* ptr, uint32_t my_tid) {
extern _Atomic uint64_t g_free_local_box_calls;
atomic_fetch_add_explicit(&g_free_local_box_calls, 1, memory_order_relaxed);
if (!(ss && ss->magic == SUPERSLAB_MAGIC)) return;
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) return;
(void)my_tid;
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
int actual_idx = slab_index_for(ss, ptr);
if (actual_idx != slab_idx) {
tiny_failfast_abort_ptr("free_local_box_idx", ss, slab_idx, ptr, "slab_idx_mismatch");
} else {
size_t blk = g_tiny_class_sizes[ss->size_class];
uint8_t* slab_base = tiny_slab_base_for(ss, slab_idx);
uintptr_t delta = (uintptr_t)ptr - (uintptr_t)slab_base;
if (blk == 0 || (delta % blk) != 0) {
tiny_failfast_abort_ptr("free_local_box_align", ss, slab_idx, ptr, "misaligned");
} else if (meta && delta / blk >= meta->capacity) {
tiny_failfast_abort_ptr("free_local_box_range", ss, slab_idx, ptr, "out_of_capacity");
}
}
}
void* prev = meta->freelist;
// FREELIST CORRUPTION DEBUG: Validate pointer before writing
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
size_t blk = g_tiny_class_sizes[ss->size_class];
uint8_t* base_ss = (uint8_t*)ss;
uint8_t* slab_base = tiny_slab_base_for(ss, slab_idx);
// Verify prev pointer is valid (if not NULL)
if (prev != NULL) {
uintptr_t prev_addr = (uintptr_t)prev;
uintptr_t slab_addr = (uintptr_t)slab_base;
// Check if prev is within this slab
if (prev_addr < (uintptr_t)base_ss || prev_addr >= (uintptr_t)base_ss + (2*1024*1024)) {
fprintf(stderr, "[FREE_CORRUPT] prev=%p outside SuperSlab ss=%p (cls=%u slab=%d)\n",
prev, ss, ss->size_class, slab_idx);
tiny_failfast_abort_ptr("free_local_prev_range", ss, slab_idx, ptr, "prev_outside_ss");
}
// Check alignment of prev
if ((prev_addr - slab_addr) % blk != 0) {
fprintf(stderr, "[FREE_CORRUPT] prev=%p misaligned (cls=%u slab=%d blk=%zu offset=%zu)\n",
prev, ss->size_class, slab_idx, blk, (size_t)(prev_addr - slab_addr));
fprintf(stderr, "[FREE_CORRUPT] Writing from ptr=%p, freelist was=%p\n", ptr, prev);
tiny_failfast_abort_ptr("free_local_prev_misalign", ss, slab_idx, ptr, "prev_misaligned");
}
}
fprintf(stderr, "[FREE_VERIFY] cls=%u slab=%d ptr=%p prev=%p (offset_ptr=%zu offset_prev=%zu)\n",
ss->size_class, slab_idx, ptr, prev,
(size_t)((uintptr_t)ptr - (uintptr_t)slab_base),
prev ? (size_t)((uintptr_t)prev - (uintptr_t)slab_base) : 0);
}
*(void**)ptr = prev;
meta->freelist = ptr;
// FREELIST CORRUPTION DEBUG: Verify write succeeded
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
void* readback = *(void**)ptr;
if (readback != prev) {
fprintf(stderr, "[FREE_CORRUPT] Wrote prev=%p to ptr=%p but read back %p!\n",
prev, ptr, readback);
fprintf(stderr, "[FREE_CORRUPT] Memory corruption detected during freelist push\n");
tiny_failfast_abort_ptr("free_local_readback", ss, slab_idx, ptr, "write_corrupted");
}
}
tiny_failfast_log("free_local_box", ss->size_class, ss, meta, ptr, prev);
// BUGFIX: Memory barrier to ensure freelist visibility before used decrement
// Without this, other threads can see new freelist but old used count (race)
atomic_thread_fence(memory_order_release);
// Optional freelist mask update on first push
do {
static int g_mask_en = -1;
if (__builtin_expect(g_mask_en == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FREELIST_MASK");
g_mask_en = (e && *e && *e != '0') ? 1 : 0;
}
if (__builtin_expect(g_mask_en, 0) && prev == NULL) {
uint32_t bit = (1u << slab_idx);
atomic_fetch_or_explicit(&ss->freelist_mask, bit, memory_order_release);
}
} while (0);
// Track local free (debug helpers may be no-op)
tiny_remote_track_on_local_free(ss, slab_idx, ptr, "local_free", my_tid);
meta->used--;
ss_active_dec_one(ss);
if (prev == NULL) {
// First-free → advertise slab to adopters
tiny_free_publish_first_free((int)ss->size_class, ss, slab_idx);
}
}