100 lines
3.6 KiB
C++
100 lines
3.6 KiB
C++
// hakmem_tiny_slow.inc
|
||
// Slow path allocation implementation
|
||
|
||
#include "box/tiny_heap_env_box.h" // tiny_c7_heap_mode_enabled()
|
||
#include "box/tiny_c7_hotbox.h" // tiny_c7_alloc_fast (TinyHeapBox 経由)
|
||
|
||
// Slow path allocation function
|
||
// Phase 6-1.7: Export for box refactor (Box 5 needs access from hakmem.c)
|
||
#ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR
|
||
void* __attribute__((cold, noinline)) hak_tiny_alloc_slow(size_t size, int class_idx) {
|
||
#else
|
||
static void* __attribute__((cold, noinline)) hak_tiny_alloc_slow(size_t size, int class_idx) {
|
||
#endif
|
||
(void)size; // size is already validated by caller
|
||
|
||
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) {
|
||
return NULL;
|
||
}
|
||
|
||
// C7 TinyHeap front ON のときは旧 slow path を通さず TinyHeapBox に委譲する。
|
||
if (__builtin_expect(class_idx == 7 && tiny_c7_heap_mode_enabled(), 0)) {
|
||
return tiny_c7_alloc_fast(size);
|
||
}
|
||
|
||
// Try refilling from HotMag
|
||
if (g_hotmag_enable && class_idx <= 3) {
|
||
TinyHotMag* hm = &g_tls_hot_mag[class_idx];
|
||
hotmag_try_refill(class_idx, hm);
|
||
void* ptr = hotmag_pop(class_idx);
|
||
if (ptr) { HAK_RET_ALLOC(class_idx, ptr); }
|
||
}
|
||
|
||
// Try TLS SLL via Box (Phase12 正式経路)
|
||
// C7 は headerless: 既存仕様通り TLS/SLL をスキップ
|
||
if (g_tls_sll_enable && class_idx != 7) {
|
||
// Box: 単一APIで TLS SLL を扱う(内部で head/count/next を管理)
|
||
void* ptr = NULL;
|
||
if (tls_sll_pop(class_idx, &ptr)) {
|
||
return ptr;
|
||
}
|
||
}
|
||
|
||
// Try TLS list (legacy small-mag) via既存API(構造体直接触らない)
|
||
if (g_tls_list_enable && class_idx != 7) {
|
||
TinyTLSList* tls = &g_tls_lists[class_idx];
|
||
|
||
// Fail-Fast: guard against poisoned head (remote sentinel)
|
||
if (__builtin_expect((uintptr_t)tls->head == TINY_REMOTE_SENTINEL, 0)) {
|
||
tls->head = NULL;
|
||
tls->count = 0;
|
||
}
|
||
|
||
if (tls->count > 0) {
|
||
void* ptr = tls_list_pop(tls, class_idx);
|
||
if (ptr) {
|
||
HAK_RET_ALLOC(class_idx, ptr);
|
||
}
|
||
}
|
||
|
||
// Try refilling TLS list from TLS-cached Superslab slab
|
||
uint32_t want = tls->refill_low > 0 ? tls->refill_low : 32;
|
||
if (tls_refill_from_tls_slab(class_idx, tls, want) > 0) {
|
||
void* ptr = tls_list_pop(tls, class_idx);
|
||
if (ptr) {
|
||
HAK_RET_ALLOC(class_idx, ptr);
|
||
}
|
||
}
|
||
}
|
||
|
||
// Background coalescing/aggregation (ENV removed, fixed OFF)
|
||
do {
|
||
(void)class_idx; // Background steps disabled
|
||
} while (0);
|
||
|
||
// Final fallback: allocate from superslab via Box API wrapper (Stage A)
|
||
// NOTE:
|
||
// - hak_tiny_alloc_superslab_box() is a thin façade over the legacy
|
||
// per-class SuperslabHead backend in Phase 12 Stage A.
|
||
// - Callers (slow path) no longer depend on internal Superslab layout.
|
||
void* ss_ptr = hak_tiny_alloc_superslab_box(class_idx);
|
||
|
||
if (ss_ptr) {
|
||
HAK_RET_ALLOC(class_idx, ss_ptr);
|
||
}
|
||
|
||
#if !HAKMEM_BUILD_RELEASE
|
||
tiny_alloc_dump_tls_state(class_idx, "slow_fail", &g_tls_slabs[class_idx]);
|
||
// Optional one-shot debug when final slow path fails
|
||
static int g_alloc_dbg = -1; if (__builtin_expect(g_alloc_dbg == -1, 0)) { const char* e=getenv("HAKMEM_TINY_ALLOC_DEBUG"); g_alloc_dbg = (e && atoi(e)!=0)?1:0; }
|
||
if (g_alloc_dbg) {
|
||
static _Atomic int printed[8]; int exp=0;
|
||
if (atomic_compare_exchange_strong(&printed[class_idx], &exp, 1)) {
|
||
fprintf(stderr, "[ALLOC-SLOW] hak_tiny_alloc_superslab returned NULL class=%d size=%zu\n", class_idx, size);
|
||
}
|
||
}
|
||
#endif
|
||
|
||
return ss_ptr;
|
||
}
|