Tiny: fix header/stride mismatch and harden refill paths

- Root cause: header-based class indexing (HEADER_CLASSIDX=1) wrote a 1-byte
  header during allocation, but linear carve/refill and initial slab capacity
  still used bare class block sizes. This mismatch could overrun slab usable
  space and corrupt freelists, causing reproducible SEGV at ~100k iters.

Changes
- Superslab: compute capacity with effective stride (block_size + header for
  classes 0..6; class7 remains headerless) in superslab_init_slab(). Add a
  debug-only bound check in superslab_alloc_from_slab() to fail fast if carve
  would exceed usable bytes.
- Refill (non-P0 and P0): use header-aware stride for all linear carving and
  TLS window bump operations. Ensure alignment/validation in tiny_refill_opt.h
  also uses stride, not raw class size.
- Drain: keep existing defense-in-depth for remote sentinel and sanitize nodes
  before splicing into freelist (already present).

Notes
- This unifies the memory layout across alloc/linear-carve/refill with a single
  stride definition and keeps class7 (1024B) headerless as designed.
- Debug builds add fail-fast checks; release builds remain lean.

Next
- Re-run Tiny benches (256/1024B) in debug to confirm stability, then in
  release. If any remaining crash persists, bisect with HAKMEM_TINY_P0_BATCH_REFILL=0
  to isolate P0 batch carve, and continue reducing branch-miss as planned.
This commit is contained in:
Moe Charm (CI)
2025-11-09 18:55:50 +09:00
parent ab68ee536d
commit 1010a961fb
171 changed files with 10238 additions and 634 deletions

View File

@ -13,6 +13,7 @@
// ============================================================================
// Phase 6.24: Allocate from SuperSlab slab (lazy freelist + linear allocation)
#include "hakmem_tiny_superslab_constants.h"
static inline void* superslab_alloc_from_slab(SuperSlab* ss, int slab_idx) {
TinySlabMeta* meta = &ss->slabs[slab_idx];
@ -70,13 +71,36 @@ static inline void* superslab_alloc_from_slab(SuperSlab* ss, int slab_idx) {
// This avoids the 4000-8000 cycle cost of building freelist on init
if (__builtin_expect(meta->freelist == NULL && meta->used < meta->capacity, 1)) {
// Linear allocation: use canonical tiny_slab_base_for() only
size_t block_size = g_tiny_class_sizes[ss->size_class];
size_t unit_sz = g_tiny_class_sizes[ss->size_class]
#if HAKMEM_TINY_HEADER_CLASSIDX
+ ((ss->size_class != 7) ? 1 : 0)
#endif
;
uint8_t* base = tiny_slab_base_for(ss, slab_idx);
void* block = (void*)(base + ((size_t)meta->used * block_size));
void* block_base = (void*)(base + ((size_t)meta->used * unit_sz));
#if !HAKMEM_BUILD_RELEASE
// Debug safety: Ensure we never carve past slab usable region (capacity mismatch guard)
size_t dbg_usable = (slab_idx == 0) ? SUPERSLAB_SLAB0_USABLE_SIZE : SUPERSLAB_SLAB_USABLE_SIZE;
uintptr_t dbg_off = (uintptr_t)((uint8_t*)block_base - base);
if (__builtin_expect(dbg_off + unit_sz > dbg_usable, 0)) {
fprintf(stderr, "[TINY_ALLOC_BOUNDS] cls=%u slab=%d used=%u cap=%u unit=%zu off=%lu usable=%zu\n",
ss->size_class, slab_idx, meta->used, meta->capacity, unit_sz,
(unsigned long)dbg_off, dbg_usable);
return NULL;
}
#endif
meta->used++;
tiny_remote_track_on_alloc(ss, slab_idx, block, "linear_alloc", 0);
tiny_remote_assert_not_remote(ss, slab_idx, block, "linear_alloc_ret", 0);
return block; // Fast path: O(1) pointer arithmetic
void* user =
#if HAKMEM_TINY_HEADER_CLASSIDX
tiny_region_id_write_header(block_base, ss->size_class);
#else
block_base;
#endif
if (__builtin_expect(g_debug_remote_guard, 0)) {
tiny_remote_track_on_alloc(ss, slab_idx, user, "linear_alloc", 0);
tiny_remote_assert_not_remote(ss, slab_idx, user, "linear_alloc_ret", 0);
}
return user; // Fast path: O(1) pointer arithmetic
}
// Freelist mode (after first free())
@ -125,8 +149,10 @@ static inline void* superslab_alloc_from_slab(SuperSlab* ss, int slab_idx) {
}
}
tiny_remote_track_on_alloc(ss, slab_idx, block, "freelist_alloc", 0);
tiny_remote_assert_not_remote(ss, slab_idx, block, "freelist_alloc_ret", 0);
if (__builtin_expect(g_debug_remote_guard, 0)) {
tiny_remote_track_on_alloc(ss, slab_idx, block, "freelist_alloc", 0);
tiny_remote_assert_not_remote(ss, slab_idx, block, "freelist_alloc_ret", 0);
}
return block;
}