Files
hakmem/core/box/ss_tls_bind_box.h

142 lines
5.3 KiB
C
Raw Normal View History

// ss_tls_bind_box.h - TLS Bind Box
//
// Purpose:
// - Encapsulate the logic for binding a SuperSlab slab to a thread's TLS.
// - Ensures consistent initialization (superslab_init_slab) and TLS state updates.
// - Acts as a "public-ish" internal API for Shared Pool, Warm Pool, and Page Box.
#ifndef HAK_SS_TLS_BIND_BOX_H
#define HAK_SS_TLS_BIND_BOX_H
#include "../hakmem_tiny_superslab.h"
#include "../tiny_tls.h"
#include "../hakmem_tiny_config.h"
#include "../box/tiny_page_box.h" // For tiny_page_box_on_new_slab()
#include <stdio.h>
#include <stdatomic.h>
// Forward declaration if not included
// CRITICAL FIX: type must match core/hakmem_tiny_config.h (const size_t, not uint16_t)
extern const size_t g_tiny_class_sizes[TINY_NUM_CLASSES];
// TLS Bind Box: initialize one slab within a SuperSlab and bind it to TLS.
// Returns 1 on success, 0 on failure (TLS is left in a safe state).
//
// Arguments:
// class_idx: Target size class index (0-7)
// tls: Pointer to thread-local TLS slab state (e.g. &g_tls_slabs[class_idx])
// ss: Target SuperSlab
// slab_idx: Index of the slab within the SuperSlab
// owner_tid: Thread ID of the caller (used for slab ownership initialization)
//
// Pre-conditions:
// - ss and slab_idx must be valid and acquired by the caller.
// - tls must be the correct TLS state for the current thread/class.
//
// Post-conditions:
// - On success: TLS is updated to point to the new slab, and the slab is initialized.
// - On failure: TLS is reset to a clean empty state.
//
// Future Usage:
// - Shared Pool: superslab_refill() calls this after acquiring from global pool.
// - Warm Pool: Will call this after popping a warm SuperSlab to re-bind it to TLS.
// - Page Box: Will call this to bind a specific page (slab) chosen from its list.
static inline int ss_tls_bind_one(int class_idx,
TinyTLSSlab* tls,
SuperSlab* ss,
int slab_idx,
uint32_t owner_tid)
{
if (!ss || slab_idx < 0 || class_idx < 0 || class_idx >= TINY_NUM_CLASSES) {
return 0;
}
// Initialize slab metadata for this class/thread.
// NOTE:
// - superslab_init_slab must not recursively call superslab_refill().
// - class_idx will be reflected in slab_meta->class_idx.
superslab_init_slab(ss,
slab_idx,
g_tiny_class_sizes[class_idx],
owner_tid);
// CRITICAL FIX: Ensure class_idx is set after init.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
// superslab_init_slab() only sets it if meta->class_idx==255.
// We must explicitly set it to the requested class to avoid C0/C7 confusion.
TinySlabMeta* meta = &ss->slabs[slab_idx];
uint8_t old_cls = meta->class_idx;
meta->class_idx = (uint8_t)class_idx;
#if !HAKMEM_BUILD_RELEASE
if (class_idx == 7 && old_cls != class_idx) {
fprintf(stderr, "[SUPERSLAB_REFILL_FIX_C7] ss=%p slab=%d old_cls=%u new_cls=%d\n",
(void*)ss, slab_idx, old_cls, class_idx);
}
#endif
#if HAKMEM_BUILD_RELEASE
static _Atomic int rel_c7_bind_logged = 0;
if (class_idx == 7 &&
atomic_load_explicit(&rel_c7_bind_logged, memory_order_relaxed) == 0) {
fprintf(stderr,
"[REL_C7_BIND] ss=%p slab=%d cls=%u cap=%u used=%u carved=%u\n",
(void*)ss,
slab_idx,
(unsigned)meta->class_idx,
(unsigned)meta->capacity,
(unsigned)meta->used,
(unsigned)meta->carved);
atomic_store_explicit(&rel_c7_bind_logged, 1, memory_order_relaxed);
}
#else
static __thread int dbg_c7_bind_logged = 0;
if (class_idx == 7 && dbg_c7_bind_logged == 0) {
fprintf(stderr,
"[DBG_C7_BIND] ss=%p slab=%d old_cls=%u new_cls=%u cap=%u used=%u carved=%u\n",
(void*)ss,
slab_idx,
(unsigned)old_cls,
(unsigned)meta->class_idx,
(unsigned)meta->capacity,
(unsigned)meta->used,
(unsigned)meta->carved);
dbg_c7_bind_logged = 1;
}
#endif
// Bind this slab to TLS for fast subsequent allocations.
// Inline implementation of tiny_tls_bind_slab() to avoid header dependencies.
// Original logic:
// tls->ss = ss;
// tls->slab_idx = (uint8_t)slab_idx;
// tls->meta = &ss->slabs[slab_idx];
// tls->slab_base = tiny_slab_base_for(ss, slab_idx);
// tiny_page_box_on_new_slab(tls);
tls->ss = ss;
tls->slab_idx = (uint8_t)slab_idx;
tls->meta = meta; // already computed above
tls->slab_base = tiny_slab_base_for(ss, slab_idx);
// Notify Tiny Page Box (if enabled for this class)
tiny_page_box_on_new_slab(tls);
// Sanity check: TLS must now describe this slab for this class.
// On failure, revert TLS to safe state and return 0.
if (!(tls->ss == ss &&
tls->slab_idx == (uint8_t)slab_idx &&
tls->meta != NULL &&
tls->meta->class_idx == (uint8_t)class_idx &&
tls->slab_base != NULL)) {
tls->ss = NULL;
tls->meta = NULL;
tls->slab_base = NULL;
tls->slab_idx = 0;
return 0;
}
return 1;
}
#endif // HAK_SS_TLS_BIND_BOX_H