Phase 7 follow-up: header-aware in BG spill, TLS drain, and aggressive inline macros
- bg_spill: link/traverse next at base+1 for C0–C6, base for C7 - lifecycle: drain TLS SLL and fast caches reading next with header-aware offsets - tiny_alloc_fast_inline: POP/PUSH macros made header-aware to match tls_sll_box rules - add optional FREE_WRAP_ENTER trace (HAKMEM_FREE_WRAP_TRACE) for early triage Result: 0xa0/…0099 bogus free logs gone; remaining SIGBUS appears in free path early. Next: instrument early libc fallback or guard invalid pointers during init to pinpoint source.
This commit is contained in:
@ -45,19 +45,25 @@ void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock) {
|
||||
void* rest = NULL;
|
||||
void* cur = (void*)chain;
|
||||
void* prev = NULL;
|
||||
// Phase 7: header-aware next pointer (C0-C6: base+1, C7: base)
|
||||
#if HAKMEM_TINY_HEADER_CLASSIDX
|
||||
const size_t next_off = (class_idx == 7) ? 0 : 1;
|
||||
#else
|
||||
const size_t next_off = 0;
|
||||
#endif
|
||||
while (cur && processed < g_bg_spill_max_batch) {
|
||||
prev = cur;
|
||||
cur = *(void**)cur;
|
||||
cur = *(void**)((uint8_t*)cur + next_off);
|
||||
processed++;
|
||||
}
|
||||
if (cur != NULL) { rest = cur; *(void**)prev = NULL; }
|
||||
if (cur != NULL) { rest = cur; *(void**)((uint8_t*)prev + next_off) = NULL; }
|
||||
|
||||
// Return processed nodes to SS freelists
|
||||
pthread_mutex_lock(lock);
|
||||
uint32_t self_tid = tiny_self_u32_guard();
|
||||
void* node = (void*)chain;
|
||||
while (node) {
|
||||
void* next = *(void**)node;
|
||||
void* next = *(void**)((uint8_t*)node + next_off);
|
||||
SuperSlab* owner_ss = hak_super_lookup(node);
|
||||
if (owner_ss && owner_ss->magic == SUPERSLAB_MAGIC) {
|
||||
int slab_idx = slab_index_for(owner_ss, node);
|
||||
@ -69,6 +75,7 @@ void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock) {
|
||||
continue;
|
||||
}
|
||||
void* prev = meta->freelist;
|
||||
// SuperSlab freelist uses base offset (no header while free)
|
||||
*(void**)node = prev;
|
||||
meta->freelist = node;
|
||||
tiny_failfast_log("bg_spill", owner_ss->size_class, owner_ss, meta, node, prev);
|
||||
@ -87,10 +94,10 @@ void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock) {
|
||||
// Prepend remainder back to head
|
||||
uintptr_t old_head;
|
||||
void* tail = rest;
|
||||
while (*(void**)tail) tail = *(void**)tail;
|
||||
while (*(void**)((uint8_t*)tail + next_off)) tail = *(void**)((uint8_t*)tail + next_off);
|
||||
do {
|
||||
old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire);
|
||||
*(void**)tail = (void*)old_head;
|
||||
*(void**)((uint8_t*)tail + next_off) = (void*)old_head;
|
||||
} while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head,
|
||||
(uintptr_t)rest,
|
||||
memory_order_release, memory_order_relaxed));
|
||||
|
||||
Reference in New Issue
Block a user