Files
hakmem/core/tiny_refill.h
Moe Charm (CI) 6b791b97d4 ENV Cleanup: Delete Ultra HEAP & BG Remote dead code (-1,096 LOC)
Deleted files (11):
- core/ultra/ directory (6 files: tiny_ultra_heap.*, tiny_ultra_page_arena.*)
- core/front/tiny_ultrafront.h
- core/tiny_ultra_fast.inc.h
- core/hakmem_tiny_ultra_front.inc.h
- core/hakmem_tiny_ultra_simple.inc
- core/hakmem_tiny_ultra_batch_box.inc

Edited files (10):
- core/hakmem_tiny.c: Remove Ultra HEAP #includes, move ultra_batch_for_class()
- core/hakmem_tiny_tls_state_box.inc: Delete TinyUltraFront, g_ultra_simple
- core/hakmem_tiny_phase6_wrappers_box.inc: Delete ULTRA_SIMPLE block
- core/hakmem_tiny_alloc.inc: Delete Ultra-Front code block
- core/hakmem_tiny_init.inc: Delete ULTRA_SIMPLE ENV loading
- core/hakmem_tiny_remote_target.{c,h}: Delete g_bg_remote_enable/batch
- core/tiny_refill.h: Remove BG Remote check (always break)
- core/hakmem_tiny_background.inc: Delete BG Remote drain loop

Deleted ENV variables:
- HAKMEM_TINY_ULTRA_HEAP (build flag, undefined)
- HAKMEM_TINY_ULTRA_L0
- HAKMEM_TINY_ULTRA_HEAP_DUMP
- HAKMEM_TINY_ULTRA_PAGE_DUMP
- HAKMEM_TINY_ULTRA_FRONT
- HAKMEM_TINY_BG_REMOTE (no getenv, dead code)
- HAKMEM_TINY_BG_REMOTE_BATCH (no getenv, dead code)
- HAKMEM_TINY_ULTRA_SIMPLE (references only)

Impact:
- Code reduction: -1,096 lines
- Binary size: 305KB → 304KB (-1KB)
- Build: PASS
- Sanity: 15.69M ops/s (3 runs avg)
- Larson: 1 crash observed (seed 43, likely existing instability)

Notes:
- Ultra HEAP never compiled (#if HAKMEM_TINY_ULTRA_HEAP undefined)
- BG Remote variables never initialized (g_bg_remote_enable always 0)
- Ultra SLIM (ultra_slim_alloc_box.h) preserved (active 4-layer path)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-27 04:35:47 +09:00

335 lines
14 KiB
C

// tiny_refill.h - Refill Boundary box (inline helpers)
#pragma once
#include <stdatomic.h>
#include "hakmem_tiny_superslab.h"
#include "slab_handle.h"
#include "tiny_sticky.h"
#include "tiny_ready.h"
#include "box/mailbox_box.h"
#include "tiny_remote_bg.h" // Background remote-drain step (best-effort)
#include "tiny_ready_bg.h" // Ready aggregator (mailbox→ready hint)
#include "tiny_route.h" // Route Fingerprint (Box boundary tracing)
#include <stdio.h>
#include <stdlib.h>
// External helpers from main TU
static inline uint32_t tiny_self_u32(void);
static inline void tiny_tls_bind_slab(TinyTLSSlab* tls, SuperSlab* ss, int slab_idx);
// Forward decls in main TU
static inline uintptr_t hot_slot_pop(int class_idx);
static inline uintptr_t bench_pub_pop(int class_idx);
static inline SuperSlab* slab_entry_ss(uintptr_t ent);
static inline int slab_entry_idx(uintptr_t ent);
// Mailbox/Ready consumption always allowed (ENV gate removed)
static inline int tiny_mail_ready_allowed(void) { return 1; }
// Registry scan window (ENV: HAKMEM_TINY_REG_SCAN_MAX, default 256)
static inline int tiny_reg_scan_max(void) {
static int v = -1;
if (__builtin_expect(v == -1, 0)) {
const char* s = getenv("HAKMEM_TINY_REG_SCAN_MAX");
int defv = 256; // conservative default
if (s && *s) {
int parsed = atoi(s);
v = (parsed > 0) ? parsed : defv;
} else {
v = defv;
}
}
return v;
}
// Opportunistic background remote-drain knobs (ENV removed; fixed defaults)
static inline int tiny_bg_remote_tryrate(void) { return 16; }
static inline int tiny_bg_remote_budget_default(void) { return 2; }
// Mid-size simple refill (ENV: HAKMEM_TINY_MID_REFILL_SIMPLE)
static inline int tiny_mid_refill_simple_enabled(void) {
static int v = -1;
if (__builtin_expect(v == -1, 0)) {
const char* s = getenv("HAKMEM_TINY_MID_REFILL_SIMPLE");
v = (s && *s && *s != '0') ? 1 : 0;
}
return v;
}
// Try a quick adopt from sticky/hot/bench/mailbox (single pass)
static inline SuperSlab* tiny_refill_try_fast(int class_idx, TinyTLSSlab* tls) {
ROUTE_BEGIN(class_idx); ROUTE_MARK(0);
// Ready list (Box: Ready) — O(1) candidates published by free/publish
{
const int rb = 1; // Ready budget fixed (ENV removed)
for (int attempt = 0; attempt < rb; attempt++) {
ROUTE_MARK(1); // ready_try
uintptr_t ent = tiny_mail_ready_allowed() ? tiny_ready_pop(class_idx) : (uintptr_t)0;
if (!ent) break;
SuperSlab* rss = slab_entry_ss(ent);
int ridx = slab_entry_idx(ent);
uint32_t self_tid = tiny_self_u32();
SlabHandle h = slab_try_acquire(rss, ridx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
extern unsigned long long g_rf_hit_ready[];
g_rf_hit_ready[class_idx]++;
ROUTE_MARK(2); ROUTE_COMMIT(class_idx, 0x01);
return h.ss;
} else {
slab_release(&h);
}
}
}
}
// One-shot entry trace (env: HAKMEM_TINY_RF_TRACE), disabled in release builds
#if !HAKMEM_BUILD_RELEASE
do {
static int en = -1; static _Atomic int printed[8];
if (__builtin_expect(en == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_RF_TRACE");
en = (e && atoi(e) != 0) ? 1 : 0;
}
if (en) {
int expected = 0;
(void)atomic_compare_exchange_strong(&printed[class_idx], &expected, 1);
if (expected == 0) {
fprintf(stderr, "[RFTRACE] fast-refill enter class=%d\n", class_idx);
}
}
} while (0);
#endif
// For hot tiny classes (0..3), try mailbox first to avoid deeper scans
if (class_idx <= 3) {
uint32_t self_tid = tiny_self_u32();
ROUTE_MARK(3); // mail_try
uintptr_t mail = tiny_mail_ready_allowed() ? mailbox_box_fetch(class_idx) : (uintptr_t)0;
if (mail) {
SuperSlab* mss = slab_entry_ss(mail);
int midx = slab_entry_idx(mail);
SlabHandle h = slab_try_acquire(mss, midx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
ROUTE_MARK(4); ROUTE_COMMIT(class_idx, 0x02);
return h.ss;
} else {
slab_release(&h);
}
}
}
}
// Sticky ring (Box: SlabHandle)
uint32_t self_tid = tiny_self_u32();
for (int r = 0; r < TINY_STICKY_RING; r++) {
ROUTE_MARK(5); // sticky_try
SuperSlab* last_ss = g_tls_sticky_ss[class_idx][r];
if (!(last_ss && last_ss->magic == SUPERSLAB_MAGIC)) { tiny_sticky_clear(class_idx, r); continue; }
int li = g_tls_sticky_idx[class_idx][r];
int cap = ss_slabs_capacity(last_ss);
if (li < 0 || li >= cap) { tiny_sticky_clear(class_idx, r); continue; }
// Box: Try to acquire ownership
SlabHandle h = slab_try_acquire(last_ss, li, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("sticky_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA250u,
self_tid,
0);
}
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
ROUTE_MARK(6); ROUTE_COMMIT(class_idx, 0x03); return h.ss;
} else {
slab_release(&h);
}
}
int has_remote = (atomic_load_explicit(&last_ss->remote_heads[li], memory_order_acquire) != 0);
if (!has_remote) tiny_sticky_clear(class_idx, r);
}
// Hot slot
{
ROUTE_MARK(7); // hot_try
uintptr_t hs = hot_slot_pop(class_idx);
if (hs) {
SuperSlab* hss = slab_entry_ss(hs);
int hidx = slab_entry_idx(hs);
// Box: Try to acquire ownership
SlabHandle h = slab_try_acquire(hss, hidx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("hot_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA251u,
self_tid,
0);
}
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
ROUTE_MARK(8); ROUTE_COMMIT(class_idx, 0x04); return h.ss;
} else {
slab_release(&h);
}
}
}
}
// Bench
{
ROUTE_MARK(9); // bench_try
uintptr_t entb = bench_pub_pop(class_idx);
if (entb) {
SuperSlab* bss = slab_entry_ss(entb);
int bidx = slab_entry_idx(entb);
// Box: Try to acquire ownership
SlabHandle h = slab_try_acquire(bss, bidx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("bench_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA252u,
self_tid,
0);
}
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
ROUTE_MARK(10); ROUTE_COMMIT(class_idx, 0x05); return h.ss;
} else {
slab_release(&h);
}
}
}
}
// Mailbox (for non-hot classes)
if (class_idx > 3) {
ROUTE_MARK(3); // mail_try (non-hot)
uintptr_t mail = tiny_mail_ready_allowed() ? mailbox_box_fetch(class_idx) : (uintptr_t)0;
if (mail) {
SuperSlab* mss = slab_entry_ss(mail);
int midx = slab_entry_idx(mail);
// Box: Try to acquire ownership
SlabHandle h = slab_try_acquire(mss, midx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("mailbox_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA253u,
self_tid,
0);
}
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
ROUTE_MARK(4); ROUTE_COMMIT(class_idx, 0x02); return h.ss;
} else {
slab_release(&h);
}
}
}
}
// Opportunistic background remote-drain (Box: Remote Drain Coalescer)
// NOTE: BG Remote feature permanently disabled (dead code cleanup 2025-11-27)
// This block was guarded by g_bg_remote_enable which defaulted to 0
do {
// Always skip - BG Remote feature removed
break;
// TLS miss tick per class
static __thread unsigned miss_tick[8];
unsigned t = ++miss_tick[class_idx];
int period = tiny_bg_remote_tryrate();
if (__builtin_expect(period <= 1 || (t % (unsigned)period) == 0, 0)) {
int budget = tiny_bg_remote_budget_default();
tiny_remote_bg_drain_step(class_idx, budget);
// Quick second chance from Ready after drain
uintptr_t ent2 = tiny_mail_ready_allowed() ? tiny_ready_pop(class_idx) : (uintptr_t)0;
if (ent2) {
SuperSlab* ss2 = slab_entry_ss(ent2);
int idx2 = slab_entry_idx(ent2);
uint32_t self_tid = tiny_self_u32();
SlabHandle h2 = slab_try_acquire(ss2, idx2, self_tid);
if (slab_is_valid(&h2)) {
if (slab_is_safe_to_bind(&h2)) {
tiny_tls_bind_slab(tls, h2.ss, h2.slab_idx);
tiny_sticky_save(class_idx, h2.ss, h2.slab_idx);
extern unsigned long long g_rf_hit_ready[];
g_rf_hit_ready[class_idx]++;
slab_release(&h2);
return h2.ss;
}
slab_release(&h2);
}
}
// Ready Aggregator: peek mailbox and surface one hint into Ready
do {
const int agg_en = 0; // Ready aggregator ENV removed (fixed OFF)
if (agg_en && tiny_mail_ready_allowed()) {
const int mb = 1;
tiny_ready_bg_aggregate_step(class_idx, mb);
uintptr_t ent3 = tiny_ready_pop(class_idx);
if (ent3) {
SuperSlab* ss3 = slab_entry_ss(ent3);
int idx3 = slab_entry_idx(ent3);
uint32_t self_tid = tiny_self_u32();
SlabHandle h3 = slab_try_acquire(ss3, idx3, self_tid);
if (slab_is_valid(&h3)) {
if (slab_is_safe_to_bind(&h3)) {
tiny_tls_bind_slab(tls, h3.ss, h3.slab_idx);
tiny_sticky_save(class_idx, h3.ss, h3.slab_idx);
extern unsigned long long g_rf_hit_ready[];
g_rf_hit_ready[class_idx]++;
slab_release(&h3);
return h3.ss;
}
slab_release(&h3);
}
}
}
} while (0);
}
} while (0);
ROUTE_COMMIT(class_idx, 0xFF); // no candidate hit; fall back to slab/slow
return NULL;
}