Box TLS-SLL + free boundary hardening: normalize C0–C6 to base (ptr-1) at free boundary; route all caches/freelists via base; replace remaining g_tls_sll_head direct writes with Box API (tls_sll_push/splice) in refill/magazine/ultra; keep C7 excluded. Fixes rbp=0xa0 free crash by preventing header overwrite and centralizing TLS-SLL invariants.
This commit is contained in:
@ -5,6 +5,7 @@
|
||||
#include "tiny_refill.h"
|
||||
#include "tiny_tls_guard.h"
|
||||
#include "box/free_publish_box.h"
|
||||
#include "box/tls_sll_box.h" // Box TLS-SLL: C7-safe push/pop/splice
|
||||
#include "mid_tcache.h"
|
||||
extern __thread void* g_tls_sll_head[TINY_NUM_CLASSES];
|
||||
extern __thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES];
|
||||
@ -28,6 +29,9 @@ static inline int tiny_drain_to_sll_budget(void) {
|
||||
static inline void tiny_drain_freelist_to_sll_once(SuperSlab* ss, int slab_idx, int class_idx) {
|
||||
int budget = tiny_drain_to_sll_budget();
|
||||
if (__builtin_expect(budget <= 0, 1)) return;
|
||||
// CRITICAL: C7 (1KB) is headerless - MUST NOT drain to TLS SLL
|
||||
// Reason: SLL stores next pointer in first 8 bytes (user data for C7)
|
||||
if (__builtin_expect(class_idx == 7, 0)) return;
|
||||
if (!(ss && ss->magic == SUPERSLAB_MAGIC)) return;
|
||||
if (slab_idx < 0) return;
|
||||
TinySlabMeta* m = &ss->slabs[slab_idx];
|
||||
@ -64,10 +68,16 @@ static inline void tiny_drain_freelist_to_sll_once(SuperSlab* ss, int slab_idx,
|
||||
}
|
||||
|
||||
m->freelist = *(void**)p;
|
||||
*(void**)p = g_tls_sll_head[class_idx];
|
||||
g_tls_sll_head[class_idx] = p;
|
||||
g_tls_sll_count[class_idx]++;
|
||||
moved++;
|
||||
|
||||
// Use Box TLS-SLL API (C7-safe push)
|
||||
// Note: C7 already rejected at line 34, so this always succeeds
|
||||
uint32_t sll_capacity = 256; // Conservative limit
|
||||
if (tls_sll_push(class_idx, p, sll_capacity)) {
|
||||
moved++;
|
||||
} else {
|
||||
// SLL full, stop draining
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,9 +191,11 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
|
||||
|
||||
if (__builtin_expect(g_debug_fast0, 0)) {
|
||||
tiny_debug_ring_record(TINY_RING_EVENT_FRONT_BYPASS, (uint16_t)class_idx, ptr, (uintptr_t)slab_idx);
|
||||
// Always operate on block base for C0-C6 (header lives at base)
|
||||
void* base = (class_idx == 7) ? ptr : (void*)((uint8_t*)ptr - 1);
|
||||
void* prev = meta->freelist;
|
||||
*(void**)ptr = prev;
|
||||
meta->freelist = ptr;
|
||||
*(void**)base = prev;
|
||||
meta->freelist = base;
|
||||
meta->used--;
|
||||
ss_active_dec_one(ss);
|
||||
if (prev == NULL) {
|
||||
@ -195,7 +207,9 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
|
||||
}
|
||||
|
||||
if (g_fast_enable && g_fast_cap[class_idx] != 0) {
|
||||
if (tiny_fast_push(class_idx, ptr)) {
|
||||
// Push block base into fast cache
|
||||
void* base = (class_idx == 7) ? ptr : (void*)((uint8_t*)ptr - 1);
|
||||
if (tiny_fast_push(class_idx, base)) {
|
||||
tiny_debug_ring_record(TINY_RING_EVENT_FREE_FAST, (uint16_t)class_idx, ptr, slab_idx);
|
||||
HAK_STAT_FREE(class_idx);
|
||||
return;
|
||||
@ -210,15 +224,17 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
|
||||
}
|
||||
// TinyHotMag front push(8/16/32B, A/B)
|
||||
if (__builtin_expect(g_hotmag_enable && class_idx <= 2, 1)) {
|
||||
if (hotmag_push(class_idx, ptr)) {
|
||||
void* base = (class_idx == 7) ? ptr : (void*)((uint8_t*)ptr - 1);
|
||||
if (hotmag_push(class_idx, base)) {
|
||||
tiny_debug_ring_record(TINY_RING_EVENT_FREE_RETURN_MAG, (uint16_t)class_idx, ptr, 1);
|
||||
HAK_STAT_FREE(class_idx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (tls->count < tls->cap) {
|
||||
tiny_tls_list_guard_push(class_idx, tls, ptr);
|
||||
tls_list_push(tls, ptr);
|
||||
void* base = (class_idx == 7) ? ptr : (void*)((uint8_t*)ptr - 1);
|
||||
tiny_tls_list_guard_push(class_idx, tls, base);
|
||||
tls_list_push(tls, base);
|
||||
tiny_debug_ring_record(TINY_RING_EVENT_FREE_LOCAL, (uint16_t)class_idx, ptr, 0);
|
||||
HAK_STAT_FREE(class_idx);
|
||||
return;
|
||||
@ -227,8 +243,11 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
|
||||
if (__builtin_expect(seq != g_tls_param_seen[class_idx], 0)) {
|
||||
tiny_tls_refresh_params(class_idx, tls);
|
||||
}
|
||||
tiny_tls_list_guard_push(class_idx, tls, ptr);
|
||||
tls_list_push(tls, ptr);
|
||||
{
|
||||
void* base = (class_idx == 7) ? ptr : (void*)((uint8_t*)ptr - 1);
|
||||
tiny_tls_list_guard_push(class_idx, tls, base);
|
||||
tls_list_push(tls, base);
|
||||
}
|
||||
if (tls_list_should_spill(tls)) {
|
||||
tls_list_spill_excess(class_idx, tls);
|
||||
}
|
||||
@ -297,10 +316,11 @@ void hak_tiny_free(void* ptr) {
|
||||
class_idx, ptr, old_head, g_tls_sll_count[class_idx]);
|
||||
}
|
||||
|
||||
*(void**)ptr = g_tls_sll_head[class_idx];
|
||||
g_tls_sll_head[class_idx] = ptr;
|
||||
g_tls_sll_count[class_idx]++;
|
||||
return;
|
||||
// Use Box TLS-SLL API (C7-safe push)
|
||||
if (tls_sll_push(class_idx, ptr, sll_cap)) {
|
||||
return; // Success
|
||||
}
|
||||
// Fall through if push fails (SLL full or C7)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -317,7 +337,10 @@ void hak_tiny_free(void* ptr) {
|
||||
TinySlab* slab = hak_tiny_owner_slab(ptr);
|
||||
if (slab) class_idx = slab->class_idx;
|
||||
}
|
||||
if (class_idx >= 0) {
|
||||
// CRITICAL: C7 (1KB) is headerless - MUST NOT use TLS SLL
|
||||
// Reason: SLL stores next pointer in first 8 bytes (user data for C7)
|
||||
// Fix: Exclude C7 from ultra free path
|
||||
if (class_idx >= 0 && class_idx != 7) {
|
||||
// Ultra free: push directly to TLS SLL without magazine init
|
||||
int sll_cap = ultra_sll_cap_for_class(class_idx);
|
||||
if ((int)g_tls_sll_count[class_idx] < sll_cap) {
|
||||
@ -347,22 +370,26 @@ void hak_tiny_free(void* ptr) {
|
||||
class_idx, ptr, old_head, g_tls_sll_count[class_idx]);
|
||||
}
|
||||
|
||||
*(void**)ptr = g_tls_sll_head[class_idx];
|
||||
g_tls_sll_head[class_idx] = ptr;
|
||||
g_tls_sll_count[class_idx]++;
|
||||
|
||||
// CORRUPTION DEBUG: Verify write succeeded
|
||||
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
|
||||
void* readback = *(void**)ptr;
|
||||
void* new_head = g_tls_sll_head[class_idx];
|
||||
if (readback != *(void**)&readback || new_head != ptr) {
|
||||
fprintf(stderr, "[ULTRA_FREE_CORRUPT] Write verification failed! ptr=%p new_head=%p\n",
|
||||
ptr, new_head);
|
||||
abort();
|
||||
// Use Box TLS-SLL API (C7-safe push)
|
||||
// Note: C7 already rejected at line 334
|
||||
{
|
||||
void* base = (class_idx == 7) ? ptr : (void*)((uint8_t*)ptr - 1);
|
||||
if (tls_sll_push(class_idx, base, (uint32_t)sll_cap)) {
|
||||
// CORRUPTION DEBUG: Verify write succeeded
|
||||
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
|
||||
void* readback = *(void**)base;
|
||||
(void)readback;
|
||||
void* new_head = g_tls_sll_head[class_idx];
|
||||
if (new_head != base) {
|
||||
fprintf(stderr, "[ULTRA_FREE_CORRUPT] Write verification failed! base=%p new_head=%p\n",
|
||||
base, new_head);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
return; // Success
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
// Fall through if push fails (SLL full)
|
||||
}
|
||||
}
|
||||
// Fallback to existing path if class resolution fails
|
||||
@ -407,7 +434,8 @@ void hak_tiny_free(void* ptr) {
|
||||
tiny_debug_ring_record(TINY_RING_EVENT_FREE_ENTER, (uint16_t)fast_class_idx, ptr, 1);
|
||||
}
|
||||
if (fast_class_idx >= 0 && g_fast_enable && g_fast_cap[fast_class_idx] != 0) {
|
||||
if (tiny_fast_push(fast_class_idx, ptr)) {
|
||||
void* base2 = (fast_class_idx == 7) ? ptr : (void*)((uint8_t*)ptr - 1);
|
||||
if (tiny_fast_push(fast_class_idx, base2)) {
|
||||
tiny_debug_ring_record(TINY_RING_EVENT_FREE_FAST, (uint16_t)fast_class_idx, ptr, 0);
|
||||
HAK_STAT_FREE(fast_class_idx);
|
||||
return;
|
||||
|
||||
Reference in New Issue
Block a user