Tiny: fix header/stride mismatch and harden refill paths

- Root cause: header-based class indexing (HEADER_CLASSIDX=1) wrote a 1-byte
  header during allocation, but linear carve/refill and initial slab capacity
  still used bare class block sizes. This mismatch could overrun slab usable
  space and corrupt freelists, causing reproducible SEGV at ~100k iters.

Changes
- Superslab: compute capacity with effective stride (block_size + header for
  classes 0..6; class7 remains headerless) in superslab_init_slab(). Add a
  debug-only bound check in superslab_alloc_from_slab() to fail fast if carve
  would exceed usable bytes.
- Refill (non-P0 and P0): use header-aware stride for all linear carving and
  TLS window bump operations. Ensure alignment/validation in tiny_refill_opt.h
  also uses stride, not raw class size.
- Drain: keep existing defense-in-depth for remote sentinel and sanitize nodes
  before splicing into freelist (already present).

Notes
- This unifies the memory layout across alloc/linear-carve/refill with a single
  stride definition and keeps class7 (1024B) headerless as designed.
- Debug builds add fail-fast checks; release builds remain lean.

Next
- Re-run Tiny benches (256/1024B) in debug to confirm stability, then in
  release. If any remaining crash persists, bisect with HAKMEM_TINY_P0_BATCH_REFILL=0
  to isolate P0 batch carve, and continue reducing branch-miss as planned.
This commit is contained in:
Moe Charm (CI)
2025-11-09 18:55:50 +09:00
parent ab68ee536d
commit 1010a961fb
171 changed files with 10238 additions and 634 deletions

View File

@ -237,6 +237,21 @@ SuperSlab* adopt_gate_try(int class_idx, TinyTLSSlab* tls) {
int scan_limit = tiny_reg_scan_max();
if (scan_limit > reg_size) scan_limit = reg_size;
uint32_t self_tid = tiny_self_u32();
// Local helper (mirror adopt_bind_if_safe) to avoid including alloc inline here
auto int adopt_bind_if_safe_local(TinyTLSSlab* tls_l, SuperSlab* ss, int slab_idx, int class_idx_l) {
uint32_t self_tid = tiny_self_u32();
SlabHandle h = slab_try_acquire(ss, slab_idx, self_tid);
if (!slab_is_valid(&h)) return 0;
slab_drain_remote_full(&h);
if (__builtin_expect(slab_is_safe_to_bind(&h), 1)) {
tiny_tls_bind_slab(tls_l, h.ss, h.slab_idx);
slab_release(&h);
return 1;
}
slab_release(&h);
return 0;
}
for (int i = 0; i < scan_limit; i++) {
SuperSlab* cand = g_super_reg_by_class[class_idx][i];
if (!(cand && cand->magic == SUPERSLAB_MAGIC)) continue;
@ -248,25 +263,16 @@ SuperSlab* adopt_gate_try(int class_idx, TinyTLSSlab* tls) {
}
if (mask == 0) continue; // No visible freelists in this SS
int cap = ss_slabs_capacity(cand);
// Iterate set bits only
while (mask) {
int sidx = __builtin_ctz(mask);
mask &= (mask - 1); // clear lowest set bit
mask &= (mask - 1);
if (sidx >= cap) continue;
SlabHandle h = slab_try_acquire(cand, sidx, self_tid);
if (!slab_is_valid(&h)) continue;
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
}
if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
if (adopt_bind_if_safe_local(tls, cand, sidx, class_idx)) {
g_adopt_gate_success[class_idx]++;
g_reg_scan_hits[class_idx]++;
ROUTE_MARK(14); ROUTE_COMMIT(class_idx, 0x07);
slab_release(&h);
return h.ss;
return cand;
}
slab_release(&h);
}
}
return NULL;
@ -1455,7 +1461,7 @@ static inline int ultra_batch_for_class(int class_idx) {
case 1: return 96; // 16BA/B最良
case 2: return 96; // 32BA/B最良
case 3: return 224; // 64BA/B最良
case 4: return 64; // 128B
case 4: return 96; // 128B (promote front refill a bit)
case 5: return 64; // 256B (promote front refill)
case 6: return 64; // 512B (promote front refill)
default: return 32; // 1024B and others