SLL triage step 2: use safe tls_sll_pop for classes >=4 in alloc fast path; add optional safe header mode for tls_sll_push (HAKMEM_TINY_SLL_SAFEHEADER). Shared SS stable with SLL C0..C4; class5 hotpath causes crash, can be bypassed with HAKMEM_TINY_HOTPATH_CLASS5=0.

This commit is contained in:
Moe Charm (CI)
2025-11-14 01:29:55 +09:00
parent 3b05d0f048
commit e573c98a5e
2 changed files with 38 additions and 13 deletions

View File

@ -113,15 +113,29 @@ static inline bool tls_sll_push(int class_idx, void* ptr, uint32_t capacity)
}
#if HAKMEM_TINY_HEADER_CLASSIDX
// Restore header defensively for header classes (class != 0,7 use header byte).
// Header handling for header classes (class != 0,7).
// Safe mode (HAKMEM_TINY_SLL_SAFEHEADER=1): never overwrite header; reject on magic mismatch.
// Default mode: restore expected header.
if (class_idx != 0 && class_idx != 7) {
static int g_sll_safehdr = -1;
if (__builtin_expect(g_sll_safehdr == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SLL_SAFEHEADER");
g_sll_safehdr = (e && *e && *e != '0') ? 1 : 0;
}
uint8_t* b = (uint8_t*)ptr;
uint8_t expected = (uint8_t)(HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK));
// Always set; any mismatch is effectively healed here.
if (g_sll_safehdr) {
uint8_t got = *b;
if ((got & 0xF0u) != HEADER_MAGIC) {
// Reject push silently (fall back to slow path at caller)
return false;
}
} else {
PTR_TRACK_TLS_PUSH(ptr, class_idx);
PTR_TRACK_HEADER_WRITE(ptr, expected);
*b = expected;
}
}
#endif
tls_sll_debug_guard(class_idx, ptr, "push");

View File

@ -572,6 +572,8 @@ static inline void* tiny_alloc_fast(size_t size) {
// Generic front (FastCache/SFC/SLL)
// Respect SLL global toggle; when disabled, skip TLS SLL fast pop entirely
if (__builtin_expect(g_tls_sll_enable, 1)) {
// For classes 0..3 keep ultra-inline POP; for >=4 use safe Box POP to avoid UB on bad heads.
if (class_idx <= 3) {
#if HAKMEM_TINY_AGGRESSIVE_INLINE
// Phase 2: Use inline macro (3-4 instructions, zero call overhead)
TINY_ALLOC_FAST_POP_INLINE(class_idx, ptr);
@ -579,6 +581,10 @@ static inline void* tiny_alloc_fast(size_t size) {
// Legacy: Function call (10-15 instructions, 5-10 cycle overhead)
ptr = tiny_alloc_fast_pop(class_idx);
#endif
} else {
void* base = NULL;
if (tls_sll_pop(class_idx, &base)) ptr = base; else ptr = NULL;
}
} else {
ptr = NULL;
}
@ -600,6 +606,7 @@ static inline void* tiny_alloc_fast(size_t size) {
int refilled = tiny_alloc_fast_refill(class_idx);
if (__builtin_expect(refilled > 0, 1)) {
if (__builtin_expect(g_tls_sll_enable, 1)) {
if (class_idx <= 3) {
#if HAKMEM_TINY_AGGRESSIVE_INLINE
// Phase 2: Use inline macro (3-4 instructions, zero call overhead)
TINY_ALLOC_FAST_POP_INLINE(class_idx, ptr);
@ -607,6 +614,10 @@ static inline void* tiny_alloc_fast(size_t size) {
// Legacy: Function call (10-15 instructions, 5-10 cycle overhead)
ptr = tiny_alloc_fast_pop(class_idx);
#endif
} else {
void* base2 = NULL;
if (tls_sll_pop(class_idx, &base2)) ptr = base2; else ptr = NULL;
}
} else {
ptr = NULL;
}