Tiny: fix header/stride mismatch and harden refill paths

- Root cause: header-based class indexing (HEADER_CLASSIDX=1) wrote a 1-byte
  header during allocation, but linear carve/refill and initial slab capacity
  still used bare class block sizes. This mismatch could overrun slab usable
  space and corrupt freelists, causing reproducible SEGV at ~100k iters.

Changes
- Superslab: compute capacity with effective stride (block_size + header for
  classes 0..6; class7 remains headerless) in superslab_init_slab(). Add a
  debug-only bound check in superslab_alloc_from_slab() to fail fast if carve
  would exceed usable bytes.
- Refill (non-P0 and P0): use header-aware stride for all linear carving and
  TLS window bump operations. Ensure alignment/validation in tiny_refill_opt.h
  also uses stride, not raw class size.
- Drain: keep existing defense-in-depth for remote sentinel and sanitize nodes
  before splicing into freelist (already present).

Notes
- This unifies the memory layout across alloc/linear-carve/refill with a single
  stride definition and keeps class7 (1024B) headerless as designed.
- Debug builds add fail-fast checks; release builds remain lean.

Next
- Re-run Tiny benches (256/1024B) in debug to confirm stability, then in
  release. If any remaining crash persists, bisect with HAKMEM_TINY_P0_BATCH_REFILL=0
  to isolate P0 batch carve, and continue reducing branch-miss as planned.
This commit is contained in:
Moe Charm (CI)
2025-11-09 18:55:50 +09:00
parent ab68ee536d
commit 1010a961fb
171 changed files with 10238 additions and 634 deletions

View File

@ -204,14 +204,20 @@ static inline int sll_refill_small_from_ss(int class_idx, int max_take) {
TinySlabMeta* meta = tls->meta;
if (!meta) return 0;
// Class 5/6/7 special-case: simple batch refill (favor linear carve, minimal branching)
if (__builtin_expect(class_idx >= 5, 0)) {
// Class 4/5/6/7 special-case: simple batch refill (favor linear carve, minimal branching)
// Optional gate for class3 via env: HAKMEM_TINY_SIMPLE_REFILL_C3=1
static int g_simple_c3 = -1;
if (__builtin_expect(g_simple_c3 == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SIMPLE_REFILL_C3");
g_simple_c3 = (e && *e && *e != '0') ? 1 : 0;
}
if (__builtin_expect(class_idx >= 4 || (class_idx == 3 && g_simple_c3), 0)) {
uint32_t sll_cap = sll_cap_for_class(class_idx, (uint32_t)TINY_TLS_MAG_CAP);
int room = (int)sll_cap - (int)g_tls_sll_count[class_idx];
if (room <= 0) return 0;
int take = max_take < room ? max_take : room;
int taken = 0;
size_t bs = g_tiny_class_sizes[class_idx];
size_t bs = g_tiny_class_sizes[class_idx] + ((class_idx != 7) ? 1 : 0);
for (; taken < take;) {
// Linear first (LIKELY for class7)
if (__builtin_expect(meta->freelist == NULL && meta->used < meta->capacity, 1)) {
@ -251,7 +257,7 @@ static inline int sll_refill_small_from_ss(int class_idx, int max_take) {
int take = max_take < room ? max_take : room;
int taken = 0;
size_t bs = g_tiny_class_sizes[class_idx];
size_t bs = g_tiny_class_sizes[class_idx] + ((class_idx != 7) ? 1 : 0);
while (taken < take) {
void* p = NULL;
if (__builtin_expect(meta->freelist != NULL, 0)) {
@ -311,7 +317,7 @@ static inline void* superslab_tls_bump_fast(int class_idx) {
uint32_t avail = (uint32_t)cap - (uint32_t)used;
uint32_t chunk = (g_bump_chunk > 0 ? (uint32_t)g_bump_chunk : 1u);
if (chunk > avail) chunk = avail;
size_t bs = g_tiny_class_sizes[tls->ss->size_class];
size_t bs = g_tiny_class_sizes[tls->ss->size_class] + ((tls->ss->size_class != 7) ? 1 : 0);
uint8_t* base = tls->slab_base ? tls->slab_base : tiny_slab_base_for(tls->ss, tls->slab_idx);
uint8_t* start = base + ((size_t)used * bs);
// Reserve the chunk once in header (keeps remote-free accounting valid)
@ -412,7 +418,7 @@ static inline void ultra_refill_sll(int class_idx) {
}
}
if (slab) {
size_t bs = g_tiny_class_sizes[class_idx];
size_t bs = g_tiny_class_sizes[class_idx] + ((class_idx != 7) ? 1 : 0);
int remaining = need;
while (remaining > 0 && slab->free_count > 0) {
if ((int)g_tls_sll_count[class_idx] >= sll_cap) break;