Tiny: adopt boundary consolidation + class7 simple batch refill + branch hints
- Adopt boundary: keep drain→bind safety checks and mark remote pending as UNLIKELY in superslab_alloc_from_slab(). - Class7 (1024B): add simple batch SLL refill path prioritizing linear carve; reduces branchy steps for hot 1KB path. - Branch hints: favor linear alloc and mark freelist paths as unlikely where appropriate. A/B (1T, cpu2, 500k iters, with HAKMEM_TINY_ASSUME_1T=1) - 256B: ~81.3ms (down from ~83.2ms after fast_cap), cycles ~60.0M, branch‑miss ~11.07%. - 1024B: ~72.8ms (down from ~73.5ms), cycles ~27.0M, branch‑miss ~11.08%. Note: Branch miss remains ~11%; next steps: unify adopt calls across all registry paths, trim debug-only checks from hot path, and consider further fast path specialization for class 5–6 to reduce mixed‑path divergence.
This commit is contained in:
@ -16,8 +16,8 @@
|
||||
static inline void* superslab_alloc_from_slab(SuperSlab* ss, int slab_idx) {
|
||||
TinySlabMeta* meta = &ss->slabs[slab_idx];
|
||||
|
||||
// Ensure remote queue is drained before handing blocks back to TLS
|
||||
if (atomic_load_explicit(&ss->remote_heads[slab_idx], memory_order_acquire) != 0) {
|
||||
// Ensure remote queue is drained before handing blocks back to TLS (UNLIKELY in 1T)
|
||||
if (__builtin_expect(atomic_load_explicit(&ss->remote_heads[slab_idx], memory_order_acquire) != 0, 0)) {
|
||||
uint32_t self_tid = tiny_self_u32();
|
||||
SlabHandle h = slab_try_acquire(ss, slab_idx, self_tid);
|
||||
if (slab_is_valid(&h)) {
|
||||
@ -68,7 +68,7 @@ static inline void* superslab_alloc_from_slab(SuperSlab* ss, int slab_idx) {
|
||||
|
||||
// Phase 6.24: Linear allocation mode (freelist == NULL)
|
||||
// This avoids the 4000-8000 cycle cost of building freelist on init
|
||||
if (meta->freelist == NULL && meta->used < meta->capacity) {
|
||||
if (__builtin_expect(meta->freelist == NULL && meta->used < meta->capacity, 1)) {
|
||||
// Linear allocation: use canonical tiny_slab_base_for() only
|
||||
size_t block_size = g_tiny_class_sizes[ss->size_class];
|
||||
uint8_t* base = tiny_slab_base_for(ss, slab_idx);
|
||||
@ -80,7 +80,7 @@ static inline void* superslab_alloc_from_slab(SuperSlab* ss, int slab_idx) {
|
||||
}
|
||||
|
||||
// Freelist mode (after first free())
|
||||
if (meta->freelist) {
|
||||
if (__builtin_expect(meta->freelist != NULL, 0)) {
|
||||
void* block = meta->freelist;
|
||||
|
||||
// CORRUPTION DEBUG: Validate freelist head before popping
|
||||
|
||||
Reference in New Issue
Block a user