// tiny_refill.h - Refill Boundary box (inline helpers) #pragma once #include #include "hakmem_tiny_superslab.h" #include "slab_handle.h" #include "tiny_sticky.h" #include "tiny_ready.h" #include "box/mailbox_box.h" #include "tiny_remote_bg.h" // Background remote-drain step (best-effort) #include "tiny_ready_bg.h" // Ready aggregator (mailbox→ready hint) #include "tiny_route.h" // Route Fingerprint (Box boundary tracing) #include #include // External helpers from main TU static inline uint32_t tiny_self_u32(void); static inline void tiny_tls_bind_slab(TinyTLSSlab* tls, SuperSlab* ss, int slab_idx); // Forward decls in main TU static inline uintptr_t hot_slot_pop(int class_idx); static inline uintptr_t bench_pub_pop(int class_idx); static inline SuperSlab* slab_entry_ss(uintptr_t ent); static inline int slab_entry_idx(uintptr_t ent); // Mailbox/Ready consumption always allowed (ENV gate removed) static inline int tiny_mail_ready_allowed(void) { return 1; } // Registry scan window (ENV: HAKMEM_TINY_REG_SCAN_MAX, default 256) static inline int tiny_reg_scan_max(void) { static int v = -1; if (__builtin_expect(v == -1, 0)) { const char* s = getenv("HAKMEM_TINY_REG_SCAN_MAX"); int defv = 256; // conservative default if (s && *s) { int parsed = atoi(s); v = (parsed > 0) ? parsed : defv; } else { v = defv; } } return v; } // Opportunistic background remote-drain knobs (ENV removed; fixed defaults) static inline int tiny_bg_remote_tryrate(void) { return 16; } static inline int tiny_bg_remote_budget_default(void) { return 2; } // Mid-size simple refill (ENV: HAKMEM_TINY_MID_REFILL_SIMPLE) static inline int tiny_mid_refill_simple_enabled(void) { static int v = -1; if (__builtin_expect(v == -1, 0)) { const char* s = getenv("HAKMEM_TINY_MID_REFILL_SIMPLE"); v = (s && *s && *s != '0') ? 1 : 0; } return v; } // Try a quick adopt from sticky/hot/bench/mailbox (single pass) static inline SuperSlab* tiny_refill_try_fast(int class_idx, TinyTLSSlab* tls) { ROUTE_BEGIN(class_idx); ROUTE_MARK(0); // Ready list (Box: Ready) — O(1) candidates published by free/publish { const int rb = 1; // Ready budget fixed (ENV removed) for (int attempt = 0; attempt < rb; attempt++) { ROUTE_MARK(1); // ready_try uintptr_t ent = tiny_mail_ready_allowed() ? tiny_ready_pop(class_idx) : (uintptr_t)0; if (!ent) break; SuperSlab* rss = slab_entry_ss(ent); int ridx = slab_entry_idx(ent); uint32_t self_tid = tiny_self_u32(); SlabHandle h = slab_try_acquire(rss, ridx, self_tid); if (slab_is_valid(&h)) { if (slab_remote_pending(&h)) { slab_drain_remote_full(&h); slab_release(&h); } else if (slab_is_safe_to_bind(&h)) { tiny_tls_bind_slab(tls, h.ss, h.slab_idx); tiny_sticky_save(class_idx, h.ss, h.slab_idx); extern unsigned long long g_rf_hit_ready[]; g_rf_hit_ready[class_idx]++; ROUTE_MARK(2); ROUTE_COMMIT(class_idx, 0x01); return h.ss; } else { slab_release(&h); } } } } // One-shot entry trace (env: HAKMEM_TINY_RF_TRACE), disabled in release builds #if !HAKMEM_BUILD_RELEASE do { static int en = -1; static _Atomic int printed[8]; if (__builtin_expect(en == -1, 0)) { const char* e = getenv("HAKMEM_TINY_RF_TRACE"); en = (e && atoi(e) != 0) ? 1 : 0; } if (en) { int expected = 0; (void)atomic_compare_exchange_strong(&printed[class_idx], &expected, 1); if (expected == 0) { fprintf(stderr, "[RFTRACE] fast-refill enter class=%d\n", class_idx); } } } while (0); #endif // For hot tiny classes (0..3), try mailbox first to avoid deeper scans if (class_idx <= 3) { uint32_t self_tid = tiny_self_u32(); ROUTE_MARK(3); // mail_try uintptr_t mail = tiny_mail_ready_allowed() ? mailbox_box_fetch(class_idx) : (uintptr_t)0; if (mail) { SuperSlab* mss = slab_entry_ss(mail); int midx = slab_entry_idx(mail); SlabHandle h = slab_try_acquire(mss, midx, self_tid); if (slab_is_valid(&h)) { if (slab_remote_pending(&h)) { slab_drain_remote_full(&h); slab_release(&h); } else if (slab_is_safe_to_bind(&h)) { tiny_tls_bind_slab(tls, h.ss, h.slab_idx); tiny_sticky_save(class_idx, h.ss, h.slab_idx); ROUTE_MARK(4); ROUTE_COMMIT(class_idx, 0x02); return h.ss; } else { slab_release(&h); } } } } // Sticky ring (Box: SlabHandle) uint32_t self_tid = tiny_self_u32(); for (int r = 0; r < TINY_STICKY_RING; r++) { ROUTE_MARK(5); // sticky_try SuperSlab* last_ss = g_tls_sticky_ss[class_idx][r]; if (!(last_ss && last_ss->magic == SUPERSLAB_MAGIC)) { tiny_sticky_clear(class_idx, r); continue; } int li = g_tls_sticky_idx[class_idx][r]; int cap = ss_slabs_capacity(last_ss); if (li < 0 || li >= cap) { tiny_sticky_clear(class_idx, r); continue; } // Box: Try to acquire ownership SlabHandle h = slab_try_acquire(last_ss, li, self_tid); if (slab_is_valid(&h)) { if (slab_remote_pending(&h)) { slab_drain_remote_full(&h); if (__builtin_expect(g_debug_remote_guard, 0)) { uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed); tiny_remote_watch_note("sticky_remote_pending", h.ss, h.slab_idx, (void*)head, 0xA250u, self_tid, 0); } slab_release(&h); } else if (slab_is_safe_to_bind(&h)) { tiny_tls_bind_slab(tls, h.ss, h.slab_idx); ROUTE_MARK(6); ROUTE_COMMIT(class_idx, 0x03); return h.ss; } else { slab_release(&h); } } int has_remote = (atomic_load_explicit(&last_ss->remote_heads[li], memory_order_acquire) != 0); if (!has_remote) tiny_sticky_clear(class_idx, r); } // Hot slot { ROUTE_MARK(7); // hot_try uintptr_t hs = hot_slot_pop(class_idx); if (hs) { SuperSlab* hss = slab_entry_ss(hs); int hidx = slab_entry_idx(hs); // Box: Try to acquire ownership SlabHandle h = slab_try_acquire(hss, hidx, self_tid); if (slab_is_valid(&h)) { if (slab_remote_pending(&h)) { slab_drain_remote_full(&h); if (__builtin_expect(g_debug_remote_guard, 0)) { uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed); tiny_remote_watch_note("hot_remote_pending", h.ss, h.slab_idx, (void*)head, 0xA251u, self_tid, 0); } slab_release(&h); } else if (slab_is_safe_to_bind(&h)) { tiny_tls_bind_slab(tls, h.ss, h.slab_idx); tiny_sticky_save(class_idx, h.ss, h.slab_idx); ROUTE_MARK(8); ROUTE_COMMIT(class_idx, 0x04); return h.ss; } else { slab_release(&h); } } } } // Bench { ROUTE_MARK(9); // bench_try uintptr_t entb = bench_pub_pop(class_idx); if (entb) { SuperSlab* bss = slab_entry_ss(entb); int bidx = slab_entry_idx(entb); // Box: Try to acquire ownership SlabHandle h = slab_try_acquire(bss, bidx, self_tid); if (slab_is_valid(&h)) { if (slab_remote_pending(&h)) { slab_drain_remote_full(&h); if (__builtin_expect(g_debug_remote_guard, 0)) { uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed); tiny_remote_watch_note("bench_remote_pending", h.ss, h.slab_idx, (void*)head, 0xA252u, self_tid, 0); } slab_release(&h); } else if (slab_is_safe_to_bind(&h)) { tiny_tls_bind_slab(tls, h.ss, h.slab_idx); tiny_sticky_save(class_idx, h.ss, h.slab_idx); ROUTE_MARK(10); ROUTE_COMMIT(class_idx, 0x05); return h.ss; } else { slab_release(&h); } } } } // Mailbox (for non-hot classes) if (class_idx > 3) { ROUTE_MARK(3); // mail_try (non-hot) uintptr_t mail = tiny_mail_ready_allowed() ? mailbox_box_fetch(class_idx) : (uintptr_t)0; if (mail) { SuperSlab* mss = slab_entry_ss(mail); int midx = slab_entry_idx(mail); // Box: Try to acquire ownership SlabHandle h = slab_try_acquire(mss, midx, self_tid); if (slab_is_valid(&h)) { if (slab_remote_pending(&h)) { slab_drain_remote_full(&h); if (__builtin_expect(g_debug_remote_guard, 0)) { uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed); tiny_remote_watch_note("mailbox_remote_pending", h.ss, h.slab_idx, (void*)head, 0xA253u, self_tid, 0); } slab_release(&h); } else if (slab_is_safe_to_bind(&h)) { tiny_tls_bind_slab(tls, h.ss, h.slab_idx); tiny_sticky_save(class_idx, h.ss, h.slab_idx); ROUTE_MARK(4); ROUTE_COMMIT(class_idx, 0x02); return h.ss; } else { slab_release(&h); } } } } // Opportunistic background remote-drain (Box: Remote Drain Coalescer) // NOTE: BG Remote feature permanently disabled (dead code cleanup 2025-11-27) // This block was guarded by g_bg_remote_enable which defaulted to 0 do { // Always skip - BG Remote feature removed break; // TLS miss tick per class static __thread unsigned miss_tick[8]; unsigned t = ++miss_tick[class_idx]; int period = tiny_bg_remote_tryrate(); if (__builtin_expect(period <= 1 || (t % (unsigned)period) == 0, 0)) { int budget = tiny_bg_remote_budget_default(); tiny_remote_bg_drain_step(class_idx, budget); // Quick second chance from Ready after drain uintptr_t ent2 = tiny_mail_ready_allowed() ? tiny_ready_pop(class_idx) : (uintptr_t)0; if (ent2) { SuperSlab* ss2 = slab_entry_ss(ent2); int idx2 = slab_entry_idx(ent2); uint32_t self_tid = tiny_self_u32(); SlabHandle h2 = slab_try_acquire(ss2, idx2, self_tid); if (slab_is_valid(&h2)) { if (slab_is_safe_to_bind(&h2)) { tiny_tls_bind_slab(tls, h2.ss, h2.slab_idx); tiny_sticky_save(class_idx, h2.ss, h2.slab_idx); extern unsigned long long g_rf_hit_ready[]; g_rf_hit_ready[class_idx]++; slab_release(&h2); return h2.ss; } slab_release(&h2); } } // Ready Aggregator: peek mailbox and surface one hint into Ready do { const int agg_en = 0; // Ready aggregator ENV removed (fixed OFF) if (agg_en && tiny_mail_ready_allowed()) { const int mb = 1; tiny_ready_bg_aggregate_step(class_idx, mb); uintptr_t ent3 = tiny_ready_pop(class_idx); if (ent3) { SuperSlab* ss3 = slab_entry_ss(ent3); int idx3 = slab_entry_idx(ent3); uint32_t self_tid = tiny_self_u32(); SlabHandle h3 = slab_try_acquire(ss3, idx3, self_tid); if (slab_is_valid(&h3)) { if (slab_is_safe_to_bind(&h3)) { tiny_tls_bind_slab(tls, h3.ss, h3.slab_idx); tiny_sticky_save(class_idx, h3.ss, h3.slab_idx); extern unsigned long long g_rf_hit_ready[]; g_rf_hit_ready[class_idx]++; slab_release(&h3); return h3.ss; } slab_release(&h3); } } } } while (0); } } while (0); ROUTE_COMMIT(class_idx, 0xFF); // no candidate hit; fall back to slab/slow return NULL; }