// hakmem_tiny_slow.inc // Slow path allocation implementation // Slow path allocation function // Phase 6-1.7: Export for box refactor (Box 5 needs access from hakmem.c) #ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR void* __attribute__((cold, noinline)) hak_tiny_alloc_slow(size_t size, int class_idx) { #else static void* __attribute__((cold, noinline)) hak_tiny_alloc_slow(size_t size, int class_idx) { #endif (void)size; // size is already validated by caller if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) { return NULL; } // Try refilling from HotMag if (g_hotmag_enable && class_idx <= 3) { TinyHotMag* hm = &g_tls_hot_mag[class_idx]; hotmag_try_refill(class_idx, hm); void* ptr = hotmag_pop(class_idx); if (ptr) { HAK_RET_ALLOC(class_idx, ptr); } } // Try TLS list refill if (g_tls_list_enable) { TinyTLSList* tls = &g_tls_lists[class_idx]; if (tls->count > 0) { void* ptr = tls_list_pop(tls); if (ptr) { HAK_RET_ALLOC(class_idx, ptr); } // ptr が NULL の場合でも、ここで終了せず後段の Superslab 経路へフォールバックする } // Try refilling TLS list from slab uint32_t want = tls->refill_low > 0 ? tls->refill_low : 32; if (tls_refill_from_tls_slab(class_idx, tls, want) > 0) { void* ptr = tls_list_pop(tls); if (ptr) { HAK_RET_ALLOC(class_idx, ptr); } // ここでも NULL の場合は続行(後段へフォールバック) } } // Final fallback: allocate from superslab void* ss_ptr = hak_tiny_alloc_superslab(class_idx); if (ss_ptr) { HAK_RET_ALLOC(class_idx, ss_ptr); } tiny_alloc_dump_tls_state(class_idx, "slow_fail", &g_tls_slabs[class_idx]); // Optional one-shot debug when final slow path fails static int g_alloc_dbg = -1; if (__builtin_expect(g_alloc_dbg == -1, 0)) { const char* e=getenv("HAKMEM_TINY_ALLOC_DEBUG"); g_alloc_dbg = (e && atoi(e)!=0)?1:0; } if (g_alloc_dbg) { static _Atomic int printed[8]; int exp=0; if (atomic_compare_exchange_strong(&printed[class_idx], &exp, 1)) { fprintf(stderr, "[ALLOC-SLOW] hak_tiny_alloc_superslab returned NULL class=%d size=%zu\n", class_idx, size); } } return ss_ptr; }