tiny: fix TLS list next_off scope; default TLS_LIST=1; add sentinel guards; header-aware TLS ops; release quiet for benches
This commit is contained in:
@ -58,6 +58,12 @@ static inline int tls_refill_from_tls_slab(int class_idx, TinyTLSList* tls, uint
|
||||
if (want == 0u) return 0;
|
||||
|
||||
size_t block_size = g_tiny_class_sizes[class_idx];
|
||||
// Header-aware TLS list next offset for chains we build here
|
||||
#if HAKMEM_TINY_HEADER_CLASSIDX
|
||||
const size_t next_off_tls = (class_idx == 7) ? 0 : 1;
|
||||
#else
|
||||
const size_t next_off_tls = 0;
|
||||
#endif
|
||||
void* accum_head = NULL;
|
||||
void* accum_tail = NULL;
|
||||
uint32_t total = 0u;
|
||||
@ -73,8 +79,8 @@ static inline int tls_refill_from_tls_slab(int class_idx, TinyTLSList* tls, uint
|
||||
uint32_t need = want - total;
|
||||
while (local < need && meta->freelist) {
|
||||
void* node = meta->freelist;
|
||||
meta->freelist = *(void**)node;
|
||||
*(void**)node = local_head;
|
||||
meta->freelist = *(void**)node; // freelist is base-linked
|
||||
*(void**)((uint8_t*)node + next_off_tls) = local_head;
|
||||
local_head = node;
|
||||
if (!local_tail) local_tail = node;
|
||||
local++;
|
||||
@ -86,7 +92,7 @@ static inline int tls_refill_from_tls_slab(int class_idx, TinyTLSList* tls, uint
|
||||
accum_head = local_head;
|
||||
accum_tail = local_tail;
|
||||
} else {
|
||||
*(void**)local_tail = accum_head;
|
||||
*(void**)((uint8_t*)local_tail + next_off_tls) = accum_head;
|
||||
accum_head = local_head;
|
||||
}
|
||||
total += local;
|
||||
@ -119,7 +125,7 @@ static inline int tls_refill_from_tls_slab(int class_idx, TinyTLSList* tls, uint
|
||||
uint8_t* cursor = base_cursor;
|
||||
for (uint32_t i = 1; i < need; ++i) {
|
||||
uint8_t* next = cursor + block_size;
|
||||
*(void**)cursor = (void*)next;
|
||||
*(void**)(cursor + next_off_tls) = (void*)next;
|
||||
cursor = next;
|
||||
}
|
||||
void* local_tail = (void*)cursor;
|
||||
@ -130,14 +136,14 @@ static inline int tls_refill_from_tls_slab(int class_idx, TinyTLSList* tls, uint
|
||||
accum_head = local_head;
|
||||
accum_tail = local_tail;
|
||||
} else {
|
||||
*(void**)local_tail = accum_head;
|
||||
*(void**)((uint8_t*)local_tail + next_off_tls) = accum_head;
|
||||
accum_head = local_head;
|
||||
}
|
||||
total += need;
|
||||
}
|
||||
|
||||
if (total > 0u && accum_head) {
|
||||
tls_list_bulk_put(tls, accum_head, accum_tail, total);
|
||||
tls_list_bulk_put(tls, accum_head, accum_tail, total, class_idx);
|
||||
return (int)total;
|
||||
}
|
||||
return 0;
|
||||
@ -151,7 +157,7 @@ static inline void tls_list_spill_excess(int class_idx, TinyTLSList* tls) {
|
||||
uint32_t excess = tls->count - cap;
|
||||
void* head = NULL;
|
||||
void* tail = NULL;
|
||||
uint32_t taken = tls_list_bulk_take(tls, excess, &head, &tail);
|
||||
uint32_t taken = tls_list_bulk_take(tls, excess, &head, &tail, class_idx);
|
||||
if (taken == 0u || head == NULL) return;
|
||||
|
||||
#if HAKMEM_PROF_STATIC && HAKMEM_BUILD_DEBUG
|
||||
@ -174,8 +180,13 @@ static inline void tls_list_spill_excess(int class_idx, TinyTLSList* tls) {
|
||||
|
||||
uint32_t self_tid = tiny_self_u32();
|
||||
void* node = head;
|
||||
#if HAKMEM_TINY_HEADER_CLASSIDX
|
||||
const size_t next_off_tls = (class_idx == 7) ? 0 : 1;
|
||||
#else
|
||||
const size_t next_off_tls = 0;
|
||||
#endif
|
||||
while (node) {
|
||||
void* next = *(void**)node;
|
||||
void* next = *(void**)((uint8_t*)node + next_off_tls);
|
||||
int handled = 0;
|
||||
|
||||
// Phase 1: Try SuperSlab first (registry-based lookup, no false positives)
|
||||
@ -189,7 +200,7 @@ static inline void tls_list_spill_excess(int class_idx, TinyTLSList* tls) {
|
||||
handled = 1;
|
||||
} else {
|
||||
void* prev = meta->freelist;
|
||||
*(void**)node = prev;
|
||||
*(void**)((uint8_t*)node + 0) = prev; // freelist within slab uses base link
|
||||
meta->freelist = node;
|
||||
tiny_failfast_log("tls_spill_ss", ss->size_class, ss, meta, node, prev);
|
||||
if (meta->used > 0) meta->used--;
|
||||
@ -235,7 +246,7 @@ static inline void tls_list_spill_excess(int class_idx, TinyTLSList* tls) {
|
||||
}
|
||||
#endif
|
||||
if (!handled) {
|
||||
*(void**)node = requeue_head;
|
||||
*(void**)((uint8_t*)node + next_off_tls) = requeue_head;
|
||||
if (!requeue_head) requeue_tail = node;
|
||||
requeue_head = node;
|
||||
requeue_count++;
|
||||
@ -248,7 +259,7 @@ static inline void tls_list_spill_excess(int class_idx, TinyTLSList* tls) {
|
||||
}
|
||||
|
||||
if (requeue_head) {
|
||||
tls_list_bulk_put(tls, requeue_head, requeue_tail, requeue_count);
|
||||
tls_list_bulk_put(tls, requeue_head, requeue_tail, requeue_count, class_idx);
|
||||
}
|
||||
|
||||
#if HAKMEM_PROF_STATIC && HAKMEM_BUILD_DEBUG
|
||||
|
||||
Reference in New Issue
Block a user