Tiny: unify adopt boundary via helper; extend simple refill to class5/6; front refill tuning for class5/6

- Add adopt_bind_if_safe() and apply across reuse and registry adopt paths (single boundary: acquire→drain→bind).
- Extend simplified SLL refill to classes 5/6 to favor linear carve and reduce branching.
- Increase ultra front refill batch for classes 5/6 to keep front hot.

Perf (1T, cpu2, 500k, HAKMEM_TINY_ASSUME_1T=1):
- 256B ~85ms, cycles ~60M, branch‑miss ~11.05% (stable vs earlier best).
- 1024B ~80–73ms range depending on run; cycles ~27–28M, branch‑miss ~11%.

Next: audit remaining adopt callers, trim debug in hot path further, and consider FC/QuickSlot ordering tweaks.
This commit is contained in:
Moe Charm (CI)
2025-11-09 17:31:30 +09:00
parent 270109839a
commit ab68ee536d
2 changed files with 9 additions and 64 deletions

View File

@ -1456,7 +1456,9 @@ static inline int ultra_batch_for_class(int class_idx) {
case 2: return 96; // 32BA/B最良
case 3: return 224; // 64BA/B最良
case 4: return 64; // 128B
default: return 32; // others
case 5: return 64; // 256B (promote front refill)
case 6: return 64; // 512B (promote front refill)
default: return 32; // 1024B and others
}
}

View File

@ -355,42 +355,10 @@ static SuperSlab* superslab_refill(int class_idx) {
// (But for Larson, freelist is sufficient)
}
if (best >= 0) {
// Box: Try to acquire ownership atomically
uint32_t self = tiny_self_u32();
SlabHandle h = slab_try_acquire(adopt, best, self);
if (slab_is_valid(&h)) {
slab_drain_remote_full(&h);
if (slab_remote_pending(&h)) {
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("adopt_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA255u,
self,
0);
}
// Remote still pending; give up adopt path and fall through to normal refill.
slab_release(&h);
}
// Box 4 Boundary: bind は remote_head==0 を保証する必要がある
// slab_is_safe_to_bind() で TOCTOU-safe にチェック
if (slab_is_safe_to_bind(&h)) {
// Optional: move a few nodes to Front SLL to boost next hits
tiny_drain_freelist_to_sll_once(h.ss, h.slab_idx, class_idx);
// 安全に bind 可能freelist 存在 && remote_head==0 保証)
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
if (g_adopt_cool_period > 0) {
g_tls_adopt_cd[class_idx] = g_adopt_cool_period;
}
return h.ss;
}
// Safe to bind 失敗freelist なしor remote pending→ adopt 中止
slab_release(&h);
if (adopt_bind_if_safe(tls, adopt, best, class_idx)) {
if (g_adopt_cool_period > 0) g_tls_adopt_cd[class_idx] = g_adopt_cool_period;
return adopt;
}
// Failed to acquire or no freelist - continue searching
}
// If no freelist found, ignore and continue (optional: republish)
}
@ -427,34 +395,9 @@ static SuperSlab* superslab_refill(int class_idx) {
// Ownership protocol: MUST bind+owner_cas BEFORE drain (see Fix #3 in tiny_refill.h).
// Remote frees will be drained when the slab is adopted (see tiny_refill.h paths).
uint32_t self_tid = tiny_self_u32();
SlabHandle h = slab_try_acquire(tls->ss, i, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("reuse_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA254u,
self_tid,
0);
}
slab_release(&h);
continue;
}
// Box 4 Boundary: bind は remote_head==0 を保証する必要がある
if (slab_is_safe_to_bind(&h)) {
// Optional: move a few nodes to Front SLL to boost next hits
tiny_drain_freelist_to_sll_once(h.ss, h.slab_idx, class_idx);
reused_slabs = 1;
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
return h.ss;
}
// Safe to bind 失敗 → 次の slab を試す
slab_release(&h);
if (adopt_bind_if_safe(tls, tls->ss, i, class_idx)) {
reused_slabs = 1;
return tls->ss;
}
}