2025-11-05 12:31:14 +09:00
|
|
|
|
// hakmem_tiny_slow.inc
|
|
|
|
|
|
// Slow path allocation implementation
|
|
|
|
|
|
|
2025-12-07 22:49:28 +09:00
|
|
|
|
#include "box/tiny_heap_env_box.h" // tiny_c7_heap_mode_enabled()
|
|
|
|
|
|
#include "box/tiny_c7_hotbox.h" // tiny_c7_alloc_fast (TinyHeapBox 経由)
|
|
|
|
|
|
|
2025-11-05 12:31:14 +09:00
|
|
|
|
// Slow path allocation function
|
|
|
|
|
|
// Phase 6-1.7: Export for box refactor (Box 5 needs access from hakmem.c)
|
|
|
|
|
|
#ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR
|
|
|
|
|
|
void* __attribute__((cold, noinline)) hak_tiny_alloc_slow(size_t size, int class_idx) {
|
|
|
|
|
|
#else
|
|
|
|
|
|
static void* __attribute__((cold, noinline)) hak_tiny_alloc_slow(size_t size, int class_idx) {
|
|
|
|
|
|
#endif
|
|
|
|
|
|
(void)size; // size is already validated by caller
|
|
|
|
|
|
|
|
|
|
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) {
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-07 22:49:28 +09:00
|
|
|
|
// C7 TinyHeap front ON のときは旧 slow path を通さず TinyHeapBox に委譲する。
|
|
|
|
|
|
if (__builtin_expect(class_idx == 7 && tiny_c7_heap_mode_enabled(), 0)) {
|
|
|
|
|
|
return tiny_c7_alloc_fast(size);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-11-05 12:31:14 +09:00
|
|
|
|
// Try refilling from HotMag
|
|
|
|
|
|
if (g_hotmag_enable && class_idx <= 3) {
|
|
|
|
|
|
TinyHotMag* hm = &g_tls_hot_mag[class_idx];
|
|
|
|
|
|
hotmag_try_refill(class_idx, hm);
|
|
|
|
|
|
void* ptr = hotmag_pop(class_idx);
|
|
|
|
|
|
if (ptr) { HAK_RET_ALLOC(class_idx, ptr); }
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
|
// Try TLS SLL via Box (Phase12 正式経路)
|
|
|
|
|
|
// C7 は headerless: 既存仕様通り TLS/SLL をスキップ
|
|
|
|
|
|
if (g_tls_sll_enable && class_idx != 7) {
|
|
|
|
|
|
// Box: 単一APIで TLS SLL を扱う(内部で head/count/next を管理)
|
|
|
|
|
|
void* ptr = NULL;
|
|
|
|
|
|
if (tls_sll_pop(class_idx, &ptr)) {
|
|
|
|
|
|
return ptr;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Try TLS list (legacy small-mag) via既存API(構造体直接触らない)
|
2025-11-11 10:00:36 +09:00
|
|
|
|
if (g_tls_list_enable && class_idx != 7) {
|
2025-11-05 12:31:14 +09:00
|
|
|
|
TinyTLSList* tls = &g_tls_lists[class_idx];
|
2025-11-14 01:02:00 +09:00
|
|
|
|
|
|
|
|
|
|
// Fail-Fast: guard against poisoned head (remote sentinel)
|
2025-11-11 10:00:36 +09:00
|
|
|
|
if (__builtin_expect((uintptr_t)tls->head == TINY_REMOTE_SENTINEL, 0)) {
|
|
|
|
|
|
tls->head = NULL;
|
|
|
|
|
|
tls->count = 0;
|
|
|
|
|
|
}
|
2025-11-14 01:02:00 +09:00
|
|
|
|
|
2025-11-05 12:31:14 +09:00
|
|
|
|
if (tls->count > 0) {
|
2025-11-11 10:00:36 +09:00
|
|
|
|
void* ptr = tls_list_pop(tls, class_idx);
|
2025-11-14 01:02:00 +09:00
|
|
|
|
if (ptr) {
|
|
|
|
|
|
HAK_RET_ALLOC(class_idx, ptr);
|
|
|
|
|
|
}
|
2025-11-05 12:31:14 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
|
// Try refilling TLS list from TLS-cached Superslab slab
|
2025-11-05 12:31:14 +09:00
|
|
|
|
uint32_t want = tls->refill_low > 0 ? tls->refill_low : 32;
|
|
|
|
|
|
if (tls_refill_from_tls_slab(class_idx, tls, want) > 0) {
|
2025-11-11 10:00:36 +09:00
|
|
|
|
void* ptr = tls_list_pop(tls, class_idx);
|
2025-11-14 01:02:00 +09:00
|
|
|
|
if (ptr) {
|
|
|
|
|
|
HAK_RET_ALLOC(class_idx, ptr);
|
|
|
|
|
|
}
|
2025-11-05 12:31:14 +09:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-11-26 14:45:26 +09:00
|
|
|
|
// Background coalescing/aggregation (ENV removed, fixed OFF)
|
2025-11-07 01:27:04 +09:00
|
|
|
|
do {
|
2025-11-26 14:45:26 +09:00
|
|
|
|
(void)class_idx; // Background steps disabled
|
2025-11-07 01:27:04 +09:00
|
|
|
|
} while (0);
|
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
|
// Final fallback: allocate from superslab via Box API wrapper (Stage A)
|
|
|
|
|
|
// NOTE:
|
|
|
|
|
|
// - hak_tiny_alloc_superslab_box() is a thin façade over the legacy
|
|
|
|
|
|
// per-class SuperslabHead backend in Phase 12 Stage A.
|
|
|
|
|
|
// - Callers (slow path) no longer depend on internal Superslab layout.
|
|
|
|
|
|
void* ss_ptr = hak_tiny_alloc_superslab_box(class_idx);
|
2025-11-15 14:35:44 +09:00
|
|
|
|
|
|
|
|
|
|
if (ss_ptr) {
|
|
|
|
|
|
HAK_RET_ALLOC(class_idx, ss_ptr);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-11-28 00:43:57 +09:00
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
2025-11-05 12:31:14 +09:00
|
|
|
|
tiny_alloc_dump_tls_state(class_idx, "slow_fail", &g_tls_slabs[class_idx]);
|
|
|
|
|
|
// Optional one-shot debug when final slow path fails
|
|
|
|
|
|
static int g_alloc_dbg = -1; if (__builtin_expect(g_alloc_dbg == -1, 0)) { const char* e=getenv("HAKMEM_TINY_ALLOC_DEBUG"); g_alloc_dbg = (e && atoi(e)!=0)?1:0; }
|
|
|
|
|
|
if (g_alloc_dbg) {
|
|
|
|
|
|
static _Atomic int printed[8]; int exp=0;
|
|
|
|
|
|
if (atomic_compare_exchange_strong(&printed[class_idx], &exp, 1)) {
|
|
|
|
|
|
fprintf(stderr, "[ALLOC-SLOW] hak_tiny_alloc_superslab returned NULL class=%d size=%zu\n", class_idx, size);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-11-28 00:43:57 +09:00
|
|
|
|
#endif
|
2025-11-15 14:35:44 +09:00
|
|
|
|
|
2025-11-05 12:31:14 +09:00
|
|
|
|
return ss_ptr;
|
|
|
|
|
|
}
|