## Summary - ChatGPT により bench_profile.h の setenv segfault を修正(RTLD_NEXT 経由に切り替え) - core/box/pool_zero_mode_box.h 新設:ENV キャッシュ経由で ZERO_MODE を統一管理 - core/hakmem_pool.c で zero mode に応じた memset 制御(FULL/header/off) - A/B テスト結果:ZERO_MODE=header で +15.34% improvement(1M iterations, C6-heavy) ## Files Modified - core/box/pool_api.inc.h: pool_zero_mode_box.h include - core/bench_profile.h: glibc setenv → malloc+putenv(segfault 回避) - core/hakmem_pool.c: zero mode 参照・制御ロジック - core/box/pool_zero_mode_box.h (新設): enum/getter - CURRENT_TASK.md: Phase ML1 結果記載 ## Test Results | Iterations | ZERO_MODE=full | ZERO_MODE=header | Improvement | |-----------|----------------|-----------------|------------| | 10K | 3.06 M ops/s | 3.17 M ops/s | +3.65% | | 1M | 23.71 M ops/s | 27.34 M ops/s | **+15.34%** | 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
287 lines
11 KiB
C
287 lines
11 KiB
C
// hak_alloc_api.inc.h — Box: hak_alloc_at() implementation
|
|
// Phase 2 Update: Lane-based allocation routing (Single Source of Truth)
|
|
#ifndef HAK_ALLOC_API_INC_H
|
|
#define HAK_ALLOC_API_INC_H
|
|
|
|
#include "../hakmem_tiny.h" // For tiny_get_max_size() + hak_lane_classify.inc.h
|
|
#include "../hakmem_pool.h" // Phase 2: For hak_pool_try_alloc() (Pool lane 1025B-52KB)
|
|
#include "../hakmem_smallmid.h" // For Small-Mid Front Box (Phase 17-1)
|
|
#include "tiny_heap_env_box.h" // TinyHeap front gate (C7 / multi-class)
|
|
#include "tiny_heap_box.h" // TinyHeapBox alloc/free helpers
|
|
#include "tiny_c7_hotbox.h" // tiny_c7_alloc_fast wrapper
|
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
|
#include "../pool_tls.h"
|
|
#endif
|
|
|
|
#include "mid_large_config_box.h" // Phase 5-Step3: Compile-time config for Mid/Large
|
|
|
|
// Centralized OS mapping boundary to keep syscalls in one place
|
|
static inline void* hak_os_map_boundary(size_t size, uintptr_t site_id) {
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_START(t_mmap);
|
|
#endif
|
|
void* p = hak_alloc_mmap_impl(size);
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_END(HKM_CAT_SYSCALL_MMAP, t_mmap);
|
|
#endif
|
|
(void)site_id; // reserved for future accounting/learning
|
|
return p;
|
|
}
|
|
|
|
__attribute__((always_inline))
|
|
inline void* hak_alloc_at(size_t size, hak_callsite_t site) {
|
|
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_START(t0);
|
|
#endif
|
|
static _Atomic int g_hak_alloc_at_trace = 0;
|
|
if (atomic_fetch_add_explicit(&g_hak_alloc_at_trace, 1, memory_order_relaxed) < 128) {
|
|
HAK_TRACE("[hak_alloc_at_enter]\n");
|
|
}
|
|
if (!g_initialized) hak_init();
|
|
|
|
// Adaptive CAS: Register thread on first allocation
|
|
hakmem_thread_register();
|
|
|
|
uintptr_t site_id = (uintptr_t)site;
|
|
|
|
// Phase 17-1: Small-Mid Front Box (256B-1KB) - TRY FIRST!
|
|
// Strategy: Thin TLS cache layer, no backend (falls through on miss)
|
|
// ENV: HAKMEM_SMALLMID_ENABLE=1 to enable (default: OFF)
|
|
// CRITICAL: Must come BEFORE Tiny to avoid routing conflict
|
|
// When enabled, auto-adjusts Tiny to C0-C5 (0-255B only)
|
|
// PERF_OPT: unlikely hint - smallmid disabled by default
|
|
if (__builtin_expect(smallmid_is_enabled() && smallmid_is_in_range(size), 0)) {
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_START(t_smallmid);
|
|
#endif
|
|
void* sm_ptr = smallmid_alloc(size);
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_END(HKM_CAT_TINY_ALLOC, t_smallmid);
|
|
#endif
|
|
// PERF_OPT: likely hint - smallmid usually succeeds when enabled
|
|
if (__builtin_expect(sm_ptr != NULL, 1)) {
|
|
hkm_ace_track_alloc();
|
|
return sm_ptr;
|
|
}
|
|
// TLS miss: Fall through to Mid/ACE (Tiny skipped due to auto-adjust)
|
|
}
|
|
|
|
// Phase 16: Dynamic Tiny max size (ENV: HAKMEM_TINY_MAX_CLASS)
|
|
// Default: 1023B (C0-C7), reduced to 255B (C0-C5) when Small-Mid enabled
|
|
// Phase 17-1: Auto-adjusted to avoid overlap with Small-Mid
|
|
int tiny_class_idx = -1;
|
|
int tiny_heap_route = 0;
|
|
int tiny_tried = 0;
|
|
if (__builtin_expect(size <= tiny_get_max_size(), 1)) {
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_START(t_tiny);
|
|
#endif
|
|
void* tiny_ptr = NULL;
|
|
#ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR
|
|
tiny_ptr = hak_tiny_alloc_fast_wrapper(size);
|
|
#elif defined(HAKMEM_TINY_PHASE6_METADATA)
|
|
tiny_ptr = hak_tiny_alloc_metadata(size);
|
|
#else
|
|
tiny_ptr = hak_tiny_alloc(size);
|
|
#endif
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_END(HKM_CAT_TINY_ALLOC, t_tiny);
|
|
#endif
|
|
// PERF_OPT: likely hint - tiny allocations usually succeed (hot path)
|
|
tiny_class_idx = hak_tiny_size_to_class(size);
|
|
tiny_heap_route = (tiny_class_idx >= 0 && tiny_heap_class_route_enabled(tiny_class_idx));
|
|
tiny_tried = 1;
|
|
|
|
if (__builtin_expect(tiny_ptr != NULL, 1)) { hkm_ace_track_alloc(); return tiny_ptr; }
|
|
|
|
// TinyHeap route is also "Tiny lane success" (C7 or other enabled classes)
|
|
if (__builtin_expect(tiny_heap_route, 0)) {
|
|
void* th_ptr = NULL;
|
|
if (tiny_class_idx == 7 && tiny_c7_hot_enabled()) {
|
|
th_ptr = tiny_c7_alloc_fast(size);
|
|
} else {
|
|
th_ptr = tiny_heap_alloc_class_fast(tiny_heap_ctx_for_thread(), tiny_class_idx, size);
|
|
}
|
|
if (th_ptr) { hkm_ace_track_alloc(); return th_ptr; }
|
|
}
|
|
|
|
// PHASE 7 CRITICAL FIX: No malloc fallback for Tiny failures
|
|
// If Tiny fails for size <= tiny_get_max_size(), let it flow to Mid/ACE layers
|
|
// This prevents mixed HAKMEM/libc allocation bugs
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
if (!tiny_ptr && size <= tiny_get_max_size()) {
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
// Tiny failed - log and continue to Mid/ACE (no early return!)
|
|
static int log_count = 0;
|
|
if (log_count < 3) {
|
|
fprintf(stderr, "[DEBUG] Phase 7: tiny_alloc(%zu) failed, trying Mid/ACE layers (no malloc fallback)\n", size);
|
|
log_count++;
|
|
}
|
|
#endif
|
|
// Continue to Mid allocation below (do NOT fallback to malloc!)
|
|
}
|
|
#else
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
static int log_count = 0; if (log_count < 3) { fprintf(stderr, "[DEBUG] tiny_alloc(%zu) returned NULL, falling back\n", size); log_count++; }
|
|
#endif
|
|
#endif
|
|
}
|
|
|
|
hkm_size_hist_record(size);
|
|
|
|
// =========================================================================
|
|
// Phase 2: Pool Lane (LANE_POOL: 1025B-52KB)
|
|
// =========================================================================
|
|
// Key fix: Route 1025-52KB to Pool BEFORE ACE
|
|
// This eliminates the "unmanaged zone" (1025-2047B) that caused libc fragmentation
|
|
//
|
|
// Pool has 2KB as smallest class, so 1025-2047B requests use 2KB class
|
|
// (internal fragmentation ~48%, but better than libc fragmentation!)
|
|
|
|
if (HAK_LANE_IS_POOL(size)) {
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
|
// Pool TLS fast path (8KB-52KB only, pool_tls.c classes)
|
|
if (size >= 8192 && size <= 53248) {
|
|
void* pool_ptr = pool_alloc(size);
|
|
if (__builtin_expect(pool_ptr != NULL, 1)) return pool_ptr;
|
|
}
|
|
#endif
|
|
// Pool API path (1025B-52KB, hakmem_pool.c classes including 2KB)
|
|
// This catches 1025-8191B range that Pool TLS doesn't handle
|
|
void* pool_try = hak_pool_try_alloc(size, site_id);
|
|
if (__builtin_expect(pool_try != NULL, 1)) return pool_try;
|
|
// Fall through to ACE if Pool fails
|
|
}
|
|
|
|
#if HAKMEM_FEATURE_EVOLUTION
|
|
if (g_evo_sample_mask > 0) {
|
|
static _Atomic uint64_t tick_counter = 0;
|
|
if ((atomic_fetch_add(&tick_counter, 1) & g_evo_sample_mask) == 0) {
|
|
struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now);
|
|
uint64_t now_ns = now.tv_sec * 1000000000ULL + now.tv_nsec;
|
|
if (hak_evo_tick(now_ns)) {
|
|
int new_strategy = hak_elo_select_strategy();
|
|
atomic_store(&g_cached_strategy_id, new_strategy);
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
// Phase 5-Step3: Use Mid/Large Config Box (compile-time constant in PGO mode)
|
|
size_t threshold;
|
|
if (MID_LARGE_ELO_ENABLED) {
|
|
int strategy_id = atomic_load(&g_cached_strategy_id);
|
|
threshold = hak_elo_get_threshold(strategy_id);
|
|
} else {
|
|
threshold = 2097152;
|
|
}
|
|
|
|
if (MID_LARGE_BIGCACHE_ENABLED && size >= threshold) {
|
|
void* cached_ptr = NULL;
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_START(t_bc);
|
|
#endif
|
|
if (hak_bigcache_try_get(size, site_id, &cached_ptr)) {
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_END(HKM_CAT_BIGCACHE_GET, t_bc);
|
|
#endif
|
|
return cached_ptr;
|
|
}
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_END(HKM_CAT_BIGCACHE_GET, t_bc);
|
|
#endif
|
|
}
|
|
|
|
// =========================================================================
|
|
// Phase 2: ACE Lane (LANE_ACE: 52KB-2MB) + HUGE Lane (2MB+)
|
|
// =========================================================================
|
|
// ACE handles sizes between Pool max (52KB) and huge threshold (2MB)
|
|
// Sizes > 2MB go directly to mmap (LANE_HUGE)
|
|
|
|
if (HAK_LANE_IS_ACE(size) || size > LANE_POOL_MAX) {
|
|
const FrozenPolicy* pol = hkm_policy_get();
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_START(t_ace);
|
|
#endif
|
|
void* l1 = hkm_ace_alloc(size, site_id, pol);
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_END(HKM_CAT_POOL_GET, t_ace);
|
|
#endif
|
|
if (l1) return l1;
|
|
}
|
|
|
|
// =========================================================================
|
|
// Phase 2: Final Fallback (mmap) - should be rare after Pool fix
|
|
// =========================================================================
|
|
// With Phase 2 Pool extension, 1025-52KB should be handled by Pool
|
|
// This fallback is for:
|
|
// - LANE_HUGE (2MB+): Normal mmap path
|
|
// - Pool/ACE failures: Emergency fallback
|
|
// - LANE_TINY failures: Should not happen (design bug)
|
|
|
|
extern _Atomic uint64_t g_final_fallback_mmap_count;
|
|
|
|
void* ptr;
|
|
if (HAK_LANE_IS_HUGE(size)) {
|
|
// LANE_HUGE: Normal path for 2MB+ allocations
|
|
atomic_fetch_add(&g_final_fallback_mmap_count, 1);
|
|
ptr = hak_os_map_boundary(size, site_id);
|
|
} else if (size > LANE_TINY_MAX) {
|
|
// Pool or ACE failed for 1025B-2MB range - emergency mmap fallback
|
|
atomic_fetch_add(&g_final_fallback_mmap_count, 1);
|
|
static _Atomic int gap_alloc_count = 0;
|
|
int count = atomic_fetch_add(&gap_alloc_count, 1);
|
|
(void)count;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
if (count < 5) {
|
|
fprintf(stderr, "[HAKMEM] Phase 2 WARN: Pool/ACE fallback size=%zu (should be rare)\n", size);
|
|
}
|
|
#endif
|
|
ptr = hak_os_map_boundary(size, site_id);
|
|
} else {
|
|
// LANE_TINY failed - treat TinyHeap route as normal fallback, legacy Tiny failure is a bug
|
|
HAK_LANE_ASSERT_NO_FALLBACK(LANE_FALLBACK, size);
|
|
static _Atomic int oom_count = 0;
|
|
const int c7_heap_on = (size == 1024 && tiny_heap_box_enabled());
|
|
if (__builtin_expect(c7_heap_on, 0)) {
|
|
if (tiny_c7_hot_enabled()) {
|
|
void* retry = tiny_c7_alloc_fast(size);
|
|
if (retry) { hkm_ace_track_alloc(); return retry; }
|
|
}
|
|
errno = ENOMEM;
|
|
return NULL;
|
|
}
|
|
int count = atomic_fetch_add(&oom_count, 1);
|
|
if (tiny_heap_route) {
|
|
if (!HAKMEM_BUILD_RELEASE && count < 3) {
|
|
fprintf(stderr, "[HAKMEM] TinyHeap route fallback size=%zu class=%d (Tiny lane bypass)\n",
|
|
size, tiny_class_idx);
|
|
}
|
|
} else {
|
|
if (tiny_tried && count < 10) {
|
|
fprintf(stderr, "[HAKMEM] BUG: Tiny lane failed for size=%zu (should not happen)\n", size);
|
|
}
|
|
}
|
|
errno = ENOMEM;
|
|
return NULL;
|
|
}
|
|
if (!ptr) return NULL;
|
|
|
|
if (g_evo_sample_mask > 0) { hak_evo_record_size(size); }
|
|
AllocHeader* hdr = (AllocHeader*)((char*)ptr - HEADER_SIZE);
|
|
if (hdr->magic != HAKMEM_MAGIC) { fprintf(stderr, "[hakmem] ERROR: Invalid magic in allocated header!\n"); return ptr; }
|
|
hdr->alloc_site = site_id;
|
|
hdr->class_bytes = (size >= threshold) ? threshold : 0;
|
|
// Guard byte for FrontGate V2: force ptr[-1] away from 0xA?/0xB? to avoid Tiny misclass
|
|
((uint8_t*)hdr)[HEADER_SIZE - 1] = HAKMEM_FG_GUARD_BYTE;
|
|
|
|
#if HAKMEM_DEBUG_TIMING
|
|
HKM_TIME_END(HKM_CAT_HAK_ALLOC, t0);
|
|
#endif
|
|
return ptr;
|
|
}
|
|
|
|
#endif // HAK_ALLOC_API_INC_H
|