Files
hakmem/core/box/tiny_adopt_refill_box.h

299 lines
13 KiB
C
Raw Normal View History

#pragma once
// tiny_adopt_refill_box.h - adopt/refill 境界の単一化
// 目的: ready/mailbox/sticky/hot/bench/hint の採用と bind/drain の順序を
// 1 箇所に集約し、呼び出し側を薄いラッパに簡略化する。
#include <stdatomic.h>
#include <stdio.h>
#include <stdlib.h>
#include "../hakmem_tiny_superslab.h"
#include "../hakmem_trace_master.h"
#include "../slab_handle.h"
#include "../tiny_sticky.h"
#include "../tiny_ready.h"
#include "mailbox_box.h"
#include "../tiny_remote_bg.h"
#include "../tiny_ready_bg.h"
#include "../tiny_route.h"
// 前提: tiny_refill.h 側で tiny_mail_ready_allowed(), tiny_reg_scan_max(),
// hot_slot_pop(), bench_pub_pop(), slab_entry_ss(), slab_entry_idx() などの
// ヘルパーが定義済みであることinclude 位置を合わせて使用)。
static inline SuperSlab* tiny_adopt_refill_box(int class_idx, TinyTLSSlab* tls) {
ROUTE_BEGIN(class_idx); ROUTE_MARK(0);
// Ready list (Box: Ready) — O(1) candidates published by free/publish
{
const int rb = 1; // Ready budget fixed (ENV removed)
for (int attempt = 0; attempt < rb; attempt++) {
ROUTE_MARK(1); // ready_try
uintptr_t ent = tiny_mail_ready_allowed() ? tiny_ready_pop(class_idx) : (uintptr_t)0;
if (!ent) break;
SuperSlab* rss = slab_entry_ss(ent);
int ridx = slab_entry_idx(ent);
uint32_t self_tid = tiny_self_u32();
SlabHandle h = slab_try_acquire(rss, ridx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
extern unsigned long long g_rf_hit_ready[];
g_rf_hit_ready[class_idx]++;
ROUTE_MARK(2); ROUTE_COMMIT(class_idx, 0x01);
return h.ss;
} else {
slab_release(&h);
}
}
}
}
// One-shot entry trace (env: HAKMEM_TINY_RF_TRACE or HAKMEM_TRACE=refill)
// Phase 4c: Now uses hak_trace_check() for unified trace control
#if !HAKMEM_BUILD_RELEASE
do {
static int en = -1; static _Atomic int printed[8];
if (__builtin_expect(en == -1, 0)) {
en = hak_trace_check("HAKMEM_TINY_RF_TRACE", "refill");
}
if (en) {
int expected = 0;
(void)atomic_compare_exchange_strong(&printed[class_idx], &expected, 1);
if (expected == 0) {
fprintf(stderr, "[RFTRACE] fast-refill enter class=%d\n", class_idx);
}
}
} while (0);
#endif
// For hot tiny classes (0..3), try mailbox first to avoid deeper scans
if (class_idx <= 3) {
uint32_t self_tid = tiny_self_u32();
ROUTE_MARK(3); // mail_try
uintptr_t mail = tiny_mail_ready_allowed() ? mailbox_box_fetch(class_idx) : (uintptr_t)0;
if (mail) {
SuperSlab* mss = slab_entry_ss(mail);
int midx = slab_entry_idx(mail);
SlabHandle h = slab_try_acquire(mss, midx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
ROUTE_MARK(4); ROUTE_COMMIT(class_idx, 0x02);
return h.ss;
} else {
slab_release(&h);
}
}
}
}
// Sticky ring (Box: SlabHandle)
uint32_t self_tid = tiny_self_u32();
for (int r = 0; r < TINY_STICKY_RING; r++) {
ROUTE_MARK(5); // sticky_try
SuperSlab* last_ss = g_tls_sticky_ss[class_idx][r];
if (!(last_ss && last_ss->magic == SUPERSLAB_MAGIC)) { tiny_sticky_clear(class_idx, r); continue; }
int li = g_tls_sticky_idx[class_idx][r];
int cap = ss_slabs_capacity(last_ss);
if (li < 0 || li >= cap) { tiny_sticky_clear(class_idx, r); continue; }
// Box: Try to acquire ownership
SlabHandle h = slab_try_acquire(last_ss, li, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("sticky_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA250u,
self_tid,
0);
}
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
ROUTE_MARK(6); ROUTE_COMMIT(class_idx, 0x03); return h.ss;
} else {
slab_release(&h);
}
}
int has_remote = (atomic_load_explicit(&last_ss->remote_heads[li], memory_order_acquire) != 0);
if (!has_remote) tiny_sticky_clear(class_idx, r);
}
// Hot slot
{
ROUTE_MARK(7); // hot_try
uintptr_t hs = hot_slot_pop(class_idx);
if (hs) {
SuperSlab* hss = slab_entry_ss(hs);
int hidx = slab_entry_idx(hs);
// Box: Try to acquire ownership
SlabHandle h = slab_try_acquire(hss, hidx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("hot_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA251u,
self_tid,
0);
}
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
ROUTE_MARK(8); ROUTE_COMMIT(class_idx, 0x04); return h.ss;
} else {
slab_release(&h);
}
}
}
}
// Bench
{
ROUTE_MARK(9); // bench_try
uintptr_t entb = bench_pub_pop(class_idx);
if (entb) {
SuperSlab* bss = slab_entry_ss(entb);
int bidx = slab_entry_idx(entb);
// Box: Try to acquire ownership
SlabHandle h = slab_try_acquire(bss, bidx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("bench_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA252u,
self_tid,
0);
}
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
ROUTE_MARK(10); ROUTE_COMMIT(class_idx, 0x05); return h.ss;
} else {
slab_release(&h);
}
}
}
}
// Mailbox (for non-hot classes)
if (class_idx > 3) {
ROUTE_MARK(3); // mail_try (non-hot)
uintptr_t mail = tiny_mail_ready_allowed() ? mailbox_box_fetch(class_idx) : (uintptr_t)0;
if (mail) {
SuperSlab* mss = slab_entry_ss(mail);
int midx = slab_entry_idx(mail);
// Box: Try to acquire ownership
SlabHandle h = slab_try_acquire(mss, midx, self_tid);
if (slab_is_valid(&h)) {
if (slab_remote_pending(&h)) {
slab_drain_remote_full(&h);
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h.ss->remote_heads[h.slab_idx], memory_order_relaxed);
tiny_remote_watch_note("mailbox_remote_pending",
h.ss,
h.slab_idx,
(void*)head,
0xA253u,
self_tid,
0);
}
slab_release(&h);
} else if (slab_is_safe_to_bind(&h)) {
tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
tiny_sticky_save(class_idx, h.ss, h.slab_idx);
ROUTE_MARK(4); ROUTE_COMMIT(class_idx, 0x02); return h.ss;
} else {
slab_release(&h);
}
}
}
}
// Opportunistic background remote-drain (Box: Remote Drain Coalescer)
// NOTE: BG Remote feature permanently disabled (dead code cleanup 2025-11-27)
// This block was guarded by g_bg_remote_enable which defaulted to 0
do {
// Always skip - BG Remote feature removed
break;
// TLS miss tick per class
static __thread unsigned miss_tick[8];
unsigned t = ++miss_tick[class_idx];
int period = tiny_bg_remote_tryrate();
if (__builtin_expect(period <= 1 || (t % (unsigned)period) == 0, 0)) {
int budget = tiny_bg_remote_budget_default();
tiny_remote_bg_drain_step(class_idx, budget);
// Quick second chance from Ready after drain
uintptr_t ent2 = tiny_mail_ready_allowed() ? tiny_ready_pop(class_idx) : (uintptr_t)0;
if (ent2) {
SuperSlab* ss2 = slab_entry_ss(ent2);
int idx2 = slab_entry_idx(ent2);
uint32_t self_tid = tiny_self_u32();
SlabHandle h2 = slab_try_acquire(ss2, idx2, self_tid);
if (slab_is_valid(&h2)) {
if (slab_is_safe_to_bind(&h2)) {
tiny_tls_bind_slab(tls, h2.ss, h2.slab_idx);
tiny_sticky_save(class_idx, h2.ss, h2.slab_idx);
extern unsigned long long g_rf_hit_ready[];
g_rf_hit_ready[class_idx]++;
slab_release(&h2);
return h2.ss;
}
slab_release(&h2);
}
}
// Ready Aggregator: peek mailbox and surface one hint into Ready
do {
const int agg_en = 0; // Ready aggregator ENV removed (fixed OFF)
if (agg_en && tiny_mail_ready_allowed()) {
const int mb = 1;
tiny_ready_bg_aggregate_step(class_idx, mb);
uintptr_t ent3 = tiny_ready_pop(class_idx);
if (ent3) {
SuperSlab* ss3 = slab_entry_ss(ent3);
int idx3 = slab_entry_idx(ent3);
uint32_t self_tid = tiny_self_u32();
SlabHandle h3 = slab_try_acquire(ss3, idx3, self_tid);
if (slab_is_valid(&h3)) {
if (slab_is_safe_to_bind(&h3)) {
tiny_tls_bind_slab(tls, h3.ss, h3.slab_idx);
tiny_sticky_save(class_idx, h3.ss, h3.slab_idx);
extern unsigned long long g_rf_hit_ready[];
g_rf_hit_ready[class_idx]++;
slab_release(&h3);
return h3.ss;
}
slab_release(&h3);
}
}
}
} while (0);
}
} while (0);
ROUTE_COMMIT(class_idx, 0xFF); // no candidate hit; fall back to slab/slow
return NULL;
}