Tiny: fix header/stride mismatch and harden refill paths
- Root cause: header-based class indexing (HEADER_CLASSIDX=1) wrote a 1-byte header during allocation, but linear carve/refill and initial slab capacity still used bare class block sizes. This mismatch could overrun slab usable space and corrupt freelists, causing reproducible SEGV at ~100k iters. Changes - Superslab: compute capacity with effective stride (block_size + header for classes 0..6; class7 remains headerless) in superslab_init_slab(). Add a debug-only bound check in superslab_alloc_from_slab() to fail fast if carve would exceed usable bytes. - Refill (non-P0 and P0): use header-aware stride for all linear carving and TLS window bump operations. Ensure alignment/validation in tiny_refill_opt.h also uses stride, not raw class size. - Drain: keep existing defense-in-depth for remote sentinel and sanitize nodes before splicing into freelist (already present). Notes - This unifies the memory layout across alloc/linear-carve/refill with a single stride definition and keeps class7 (1024B) headerless as designed. - Debug builds add fail-fast checks; release builds remain lean. Next - Re-run Tiny benches (256/1024B) in debug to confirm stability, then in release. If any remaining crash persists, bisect with HAKMEM_TINY_P0_BATCH_REFILL=0 to isolate P0 batch carve, and continue reducing branch-miss as planned.
This commit is contained in:
@ -6,6 +6,19 @@
|
||||
#include "../pool_tls.h"
|
||||
#endif
|
||||
|
||||
// Centralized OS mapping boundary to keep syscalls in one place
|
||||
static inline void* hak_os_map_boundary(size_t size, uintptr_t site_id) {
|
||||
#if HAKMEM_DEBUG_TIMING
|
||||
HKM_TIME_START(t_mmap);
|
||||
#endif
|
||||
void* p = hak_alloc_mmap_impl(size);
|
||||
#if HAKMEM_DEBUG_TIMING
|
||||
HKM_TIME_END(HKM_CAT_SYSCALL_MMAP, t_mmap);
|
||||
#endif
|
||||
(void)site_id; // reserved for future accounting/learning
|
||||
return p;
|
||||
}
|
||||
|
||||
__attribute__((always_inline))
|
||||
inline void* hak_alloc_at(size_t size, hak_callsite_t site) {
|
||||
#if HAKMEM_DEBUG_TIMING
|
||||
@ -144,33 +157,24 @@ inline void* hak_alloc_at(size_t size, hak_callsite_t site) {
|
||||
//
|
||||
// Solution: Use mmap for gap when ACE failed (ACE disabled or OOM)
|
||||
|
||||
// Track final fallback mmaps globally
|
||||
extern _Atomic uint64_t g_final_fallback_mmap_count;
|
||||
|
||||
void* ptr;
|
||||
if (size >= threshold) {
|
||||
// Large allocation (>= 2MB default): use mmap
|
||||
#if HAKMEM_DEBUG_TIMING
|
||||
HKM_TIME_START(t_mmap);
|
||||
#endif
|
||||
ptr = hak_alloc_mmap_impl(size);
|
||||
#if HAKMEM_DEBUG_TIMING
|
||||
HKM_TIME_END(HKM_CAT_SYSCALL_MMAP, t_mmap);
|
||||
#endif
|
||||
// Large allocation (>= 2MB default): descend via single boundary
|
||||
atomic_fetch_add(&g_final_fallback_mmap_count, 1);
|
||||
ptr = hak_os_map_boundary(size, site_id);
|
||||
} else if (size >= TINY_MAX_SIZE) {
|
||||
// Mid-range allocation (1KB-2MB): try mmap as final fallback
|
||||
// This handles the gap when ACE is disabled or failed
|
||||
atomic_fetch_add(&g_final_fallback_mmap_count, 1);
|
||||
static _Atomic int gap_alloc_count = 0;
|
||||
int count = atomic_fetch_add(&gap_alloc_count, 1);
|
||||
#if HAKMEM_DEBUG_VERBOSE
|
||||
if (count < 3) {
|
||||
fprintf(stderr, "[HAKMEM] INFO: Using mmap for mid-range size=%zu (ACE disabled or failed)\n", size);
|
||||
}
|
||||
if (count < 3) fprintf(stderr, "[HAKMEM] INFO: mid-gap fallback size=%zu\n", size);
|
||||
#endif
|
||||
#if HAKMEM_DEBUG_TIMING
|
||||
HKM_TIME_START(t_mmap);
|
||||
#endif
|
||||
ptr = hak_alloc_mmap_impl(size);
|
||||
#if HAKMEM_DEBUG_TIMING
|
||||
HKM_TIME_END(HKM_CAT_SYSCALL_MMAP, t_mmap);
|
||||
#endif
|
||||
ptr = hak_os_map_boundary(size, site_id);
|
||||
} else {
|
||||
// Should never reach here (size <= TINY_MAX_SIZE should be handled by Tiny)
|
||||
static _Atomic int oom_count = 0;
|
||||
|
||||
Reference in New Issue
Block a user