Fix: CRITICAL double-allocation bug in trc_linear_carve()

Root Cause:
trc_linear_carve() used meta->used as cursor, but meta->used decrements
on free, causing already-allocated blocks to be re-carved.

Evidence:
- [LINEAR_CARVE] used=61 batch=1 → block 61 created
- (blocks freed, used decrements 62→59)
- [LINEAR_CARVE] used=59 batch=3 → blocks 59,60,61 RE-CREATED!
- Result: double-allocation → memory corruption → SEGV

Fix Implementation:
1. Added TinySlabMeta.carved (monotonic counter, never decrements)
2. Changed trc_linear_carve() to use carved instead of used
3. carved tracks carve progress, used tracks active count

Files Modified:
- core/superslab/superslab_types.h: Add carved field
- core/tiny_refill_opt.h: Use carved in trc_linear_carve()
- core/hakmem_tiny_superslab.c: Initialize carved=0
- core/tiny_alloc_fast.inc.h: Add next pointer validation
- core/hakmem_tiny_free.inc: Add drain/free validation

Test Results:
 bench_random_mixed: 950,037 ops/s (no crash)
 Fail-fast mode: 651,627 ops/s (with diagnostic logs)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Moe Charm (CI)
2025-11-08 01:18:37 +09:00
parent a430545820
commit b7021061b8
12 changed files with 2236 additions and 1854 deletions

View File

@ -49,10 +49,31 @@ static inline void trc_push_front(TinyRefillChain* c, void* node) {
}
}
// Forward declaration of guard function
static inline int trc_refill_guard_enabled(void);
// Splice local chain into TLS SLL (single meta write)
static inline void trc_splice_to_sll(int class_idx, TinyRefillChain* c,
void** sll_head, uint32_t* sll_count) {
if (!c || c->head == NULL) return;
// CORRUPTION DEBUG: Validate chain before splicing
if (__builtin_expect(trc_refill_guard_enabled(), 0)) {
extern const size_t g_tiny_class_sizes[];
size_t blk = g_tiny_class_sizes[class_idx];
fprintf(stderr, "[SPLICE_TO_SLL] cls=%d head=%p tail=%p count=%u\n",
class_idx, c->head, c->tail, c->count);
// Check alignment of chain head
if (((uintptr_t)c->head % blk) != 0) {
fprintf(stderr, "[SPLICE_CORRUPT] Chain head %p misaligned (blk=%zu offset=%zu)!\n",
c->head, blk, (uintptr_t)c->head % blk);
fprintf(stderr, "[SPLICE_CORRUPT] Corruption detected BEFORE writing to TLS!\n");
abort();
}
}
if (c->tail) {
*(void**)c->tail = *sll_head;
}
@ -111,12 +132,26 @@ static inline uint32_t trc_pop_from_freelist(struct TinySlabMeta* meta,
if (__builtin_expect(trc_refill_guard_enabled() &&
!trc_ptr_is_valid(ss_base, ss_limit, block_size, p),
0)) {
fprintf(stderr, "[FREELIST_CORRUPT] Reading freelist head: p=%p (ss_base=%p ss_limit=%p blk=%zu)\n",
p, (void*)ss_base, (void*)ss_limit, block_size);
fprintf(stderr, "[FREELIST_CORRUPT] Head pointer is corrupted (invalid range/alignment)\n");
trc_failfast_abort("freelist_head", class_idx, ss_base, ss_limit, p);
}
void* next = *(void**)p;
if (__builtin_expect(trc_refill_guard_enabled() &&
!trc_ptr_is_valid(ss_base, ss_limit, block_size, next),
0)) {
fprintf(stderr, "[FREELIST_CORRUPT] Reading freelist node: p=%p next=%p (ss_base=%p ss_limit=%p blk=%zu)\n",
p, next, (void*)ss_base, (void*)ss_limit, block_size);
fprintf(stderr, "[FREELIST_CORRUPT] Next pointer is corrupted (cls=%d taken=%u/%u)\n",
class_idx, taken, want);
// Log offset details
if (next != NULL) {
uintptr_t offset = (uintptr_t)next - ss_base;
size_t expected_align = offset % block_size;
fprintf(stderr, "[FREELIST_CORRUPT] Corrupted offset=%zu (0x%zx) expected_align=%zu\n",
offset, offset, expected_align);
}
trc_failfast_abort("freelist_next", class_idx, ss_base, ss_limit, next);
}
meta->freelist = next;
@ -134,14 +169,37 @@ static inline uint32_t trc_linear_carve(uint8_t* base, size_t bs,
TinyRefillChain* out) {
if (!out || batch == 0) return 0;
trc_init(out);
uint8_t* cursor = base + ((size_t)meta->used * bs);
// FIX: Use carved (monotonic) instead of used (decrements on free)
// CORRUPTION DEBUG: Validate capacity before carving
if (__builtin_expect(trc_refill_guard_enabled(), 0)) {
if (meta->carved + batch > meta->capacity) {
fprintf(stderr, "[LINEAR_CARVE_CORRUPT] Carving beyond capacity!\n");
fprintf(stderr, "[LINEAR_CARVE_CORRUPT] carved=%u batch=%u capacity=%u (would be %u)\n",
meta->carved, batch, meta->capacity, meta->carved + batch);
fprintf(stderr, "[LINEAR_CARVE_CORRUPT] base=%p bs=%zu\n", (void*)base, bs);
abort();
}
}
// FIX: Use carved counter (monotonic) instead of used (which decrements on free)
uint8_t* cursor = base + ((size_t)meta->carved * bs);
void* head = (void*)cursor;
// CORRUPTION DEBUG: Log carve operation
if (__builtin_expect(trc_refill_guard_enabled(), 0)) {
fprintf(stderr, "[LINEAR_CARVE] base=%p carved=%u batch=%u cursor=%p\n",
(void*)base, meta->carved, batch, (void*)cursor);
}
for (uint32_t i = 1; i < batch; i++) {
uint8_t* next = cursor + bs;
*(void**)cursor = (void*)next;
cursor = next;
}
void* tail = (void*)cursor;
// FIX: Update both carved (monotonic) and used (active count)
meta->carved += batch;
meta->used += batch;
out->head = head;
out->tail = tail;