Feat: Add experimental TLS Bind Box path in Unified Cache

- Added experimental path in unified_cache_refill to test ss_tls_bind_one for C7 class.
- Guarded by HAKMEM_WARM_TLS_BIND_C7 env var and debug build.
- Updated Page Box comments to clarify future TLS Bind Box integration.
This commit is contained in:
Moe Charm (CI)
2025-12-05 20:05:11 +09:00
parent 45b2ccbe45
commit 4c986fa9d1
2 changed files with 46 additions and 6 deletions

View File

@ -13,11 +13,12 @@
// `max` BASE pointers using per-page freelist before falling back.
// - When disabled for a class: the box returns 0 and caller uses legacy path.
//
// - TLS Bind:
// Future direction: The Page Box will select a (SuperSlab, slab_idx)
// pair and use ss_tls_bind_one() to bind it to TLS. Subsequent
// allocations will carve directly from that TLS-bound slab,
// clarifying the boundary between Superslab Backend and TLS Bind.
// - TLS Bind Responsibility:
// Page Box selects the appropriate (SuperSlab, slab_idx) pair for the
// current request (prioritizing EMPTY or HOT slabs).
// It then delegates the binding operation to ss_tls_bind_one() (TLS Bind Box).
// This separates "Resource Selection" (Page Box) from "Context Binding"
// (TLS Bind Box), clarifying the boundary with Superslab Backend.
//
// ENV:
// HAKMEM_TINY_PAGE_BOX_CLASSES (optional)

View File

@ -517,14 +517,53 @@ hak_base_ptr_t unified_cache_refill(int class_idx) {
#endif
SuperSlab* warm_ss = tiny_warm_pool_pop(class_idx);
if (warm_ss) {
#if !HAKMEM_BUILD_RELEASE
// FUTURE: TLS Bind Box Integration
// Currently we carve directly from warm_ss via slab_carve_from_ss().
// To unify logic, we should eventually:
// 1. Choose a slab index (via tiny_page_box or heuristic).
// 2. Bind it to TLS via ss_tls_bind_one(..., warm_ss, slab_idx, ...).
// 3. Fall through to TLS-based allocation.
// EXPERIMENTAL: Test TLS Bind Box connectivity for C7 (Debug only)
static int g_warm_tls_bind_c7 = -1;
if (g_warm_tls_bind_c7 == -1) {
const char* e = getenv("HAKMEM_WARM_TLS_BIND_C7");
g_warm_tls_bind_c7 = (e && *e && *e != '0') ? 1 : 0;
}
if (g_warm_tls_bind_c7 && class_idx == 7) {
// Find a slab index in this SuperSlab that matches our class
int cap = ss_slabs_capacity(warm_ss);
int slab_idx = -1;
// Simple heuristic: find first slab belonging to this class
// Note: In real logic, we should pick the *best* slab (e.g. from PageBox)
for (int i = 0; i < cap; i++) {
if (tiny_get_class_from_ss(warm_ss, i) == class_idx) {
slab_idx = i;
break;
}
}
if (slab_idx >= 0) {
TinyTLSSlab* tls = &g_tls_slabs[class_idx];
// Try to bind. If successful, we have "connected" the path.
// For now, we still fall through to slab_carve_from_ss() to do the actual
// work, but the side effect (TLS updated) confirms connectivity.
// In a future step, we would 'break' here and let the TLS path handle it.
uint32_t tid = (uint32_t)(uintptr_t)pthread_self();
if (ss_tls_bind_one(class_idx, tls, warm_ss, slab_idx, tid)) {
static int logged = 0;
if (!logged) {
fprintf(stderr, "[WARM_TLS_BIND] C7 bind success: ss=%p slab=%d\n",
(void*)warm_ss, slab_idx);
logged = 1;
}
}
}
}
#if !HAKMEM_BUILD_RELEASE
atomic_fetch_add_explicit(&g_dbg_warm_pop_hits, 1, memory_order_relaxed);
#endif
// HOT PATH: Warm pool hit, try to carve directly