Files
hakmem/core/box/hak_alloc_api.inc.h
Moe Charm (CI) 707056b765 feat: Phase 7 + Phase 2 - Massive performance & stability improvements
Performance Achievements:
- Tiny allocations: +180-280% (21M → 59-70M ops/s random mixed)
- Single-thread: +24% (2.71M → 3.36M ops/s Larson)
- 4T stability: 0% → 95% (19/20 success rate)
- Overall: 91.3% of System malloc average (target was 40-55%) ✓

Phase 7 (Tasks 1-3): Core Optimizations
- Task 1: Header validation removal (Region-ID direct lookup)
- Task 2: Aggressive inline (TLS cache access optimization)
- Task 3: Pre-warm TLS cache (eliminate cold-start penalty)
  Result: +180-280% improvement, 85-146% of System malloc

Critical Bug Fixes:
- Fix 64B allocation crash (size-to-class +1 for header)
- Fix 4T wrapper recursion bugs (BUG #7, #8, #10, #11)
- Remove malloc fallback (30% → 50% stability)

Phase 2a: SuperSlab Dynamic Expansion (CRITICAL)
- Implement mimalloc-style chunk linking
- Unlimited slab expansion (no more OOM at 32 slabs)
- Fix chunk initialization bug (bitmap=0x00000001 after expansion)
  Files: core/hakmem_tiny_superslab.c/h, core/superslab/superslab_types.h
  Result: 50% → 95% stability (19/20 4T success)

Phase 2b: TLS Cache Adaptive Sizing
- Dynamic capacity: 16-2048 slots based on usage
- High-water mark tracking + exponential growth/shrink
- Expected: +3-10% performance, -30-50% memory
  Files: core/tiny_adaptive_sizing.c/h (new)

Phase 2c: BigCache Dynamic Hash Table
- Migrate from fixed 256×8 array to dynamic hash table
- Auto-resize: 256 → 512 → 1024 → 65,536 buckets
- Improved hash function (FNV-1a) + collision chaining
  Files: core/hakmem_bigcache.c/h
  Expected: +10-20% cache hit rate

Design Flaws Analysis:
- Identified 6 components with fixed-capacity bottlenecks
- SuperSlab (CRITICAL), TLS Cache (HIGH), BigCache/L2.5 (MEDIUM)
- Report: DESIGN_FLAWS_ANALYSIS.md (11 chapters)

Documentation:
- 13 comprehensive reports (PHASE*.md, DESIGN_FLAWS*.md)
- Implementation guides, test results, production readiness
- Bug fix reports, root cause analysis

Build System:
- Makefile: phase7 targets, PREWARM_TLS flag
- Auto dependency generation (-MMD -MP) for .inc files

Known Issues:
- 4T stability: 19/20 (95%) - investigating 1 failure for 100%
- L2.5 Pool dynamic sharding: design only (needs 2-3 days integration)

🤖 Generated with Claude Code (https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-08 17:08:00 +09:00

180 lines
6.0 KiB
C

// hak_alloc_api.inc.h — Box: hak_alloc_at() implementation
#ifndef HAK_ALLOC_API_INC_H
#define HAK_ALLOC_API_INC_H
__attribute__((always_inline))
inline void* hak_alloc_at(size_t size, hak_callsite_t site) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t0);
#endif
if (!g_initialized) hak_init();
uintptr_t site_id = (uintptr_t)site;
if (__builtin_expect(size <= TINY_MAX_SIZE, 1)) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_tiny);
#endif
void* tiny_ptr = NULL;
#ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR
tiny_ptr = hak_tiny_alloc_fast_wrapper(size);
#elif defined(HAKMEM_TINY_PHASE6_ULTRA_SIMPLE)
tiny_ptr = hak_tiny_alloc_ultra_simple(size);
#elif defined(HAKMEM_TINY_PHASE6_METADATA)
tiny_ptr = hak_tiny_alloc_metadata(size);
#else
tiny_ptr = hak_tiny_alloc(size);
#endif
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_TINY_ALLOC, t_tiny);
#endif
if (tiny_ptr) { hkm_ace_track_alloc(); return tiny_ptr; }
// PHASE 7 CRITICAL FIX: No malloc fallback for Tiny failures
// If Tiny fails for size <= TINY_MAX_SIZE, let it flow to Mid/ACE layers
// This prevents mixed HAKMEM/libc allocation bugs
#if HAKMEM_TINY_HEADER_CLASSIDX
if (!tiny_ptr && size <= TINY_MAX_SIZE) {
// Tiny failed - log and continue to Mid/ACE (no early return!)
static int log_count = 0;
if (log_count < 3) {
fprintf(stderr, "[DEBUG] Phase 7: tiny_alloc(%zu) failed, trying Mid/ACE layers (no malloc fallback)\n", size);
log_count++;
}
// Continue to Mid allocation below (do NOT fallback to malloc!)
}
#else
static int log_count = 0; if (log_count < 3) { fprintf(stderr, "[DEBUG] tiny_alloc(%zu) returned NULL, falling back\n", size); log_count++; }
#endif
}
hkm_size_hist_record(size);
if (__builtin_expect(mid_is_in_range(size), 0)) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_mid);
#endif
void* mid_ptr = mid_mt_alloc(size);
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_POOL_GET, t_mid);
#endif
if (mid_ptr) return mid_ptr;
}
#if HAKMEM_FEATURE_EVOLUTION
if (g_evo_sample_mask > 0) {
static _Atomic uint64_t tick_counter = 0;
if ((atomic_fetch_add(&tick_counter, 1) & g_evo_sample_mask) == 0) {
struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now);
uint64_t now_ns = now.tv_sec * 1000000000ULL + now.tv_nsec;
if (hak_evo_tick(now_ns)) {
int new_strategy = hak_elo_select_strategy();
atomic_store(&g_cached_strategy_id, new_strategy);
}
}
}
#endif
size_t threshold;
if (HAK_ENABLED_LEARNING(HAKMEM_FEATURE_ELO)) {
int strategy_id = atomic_load(&g_cached_strategy_id);
threshold = hak_elo_get_threshold(strategy_id);
} else {
threshold = 2097152;
}
if (HAK_ENABLED_CACHE(HAKMEM_FEATURE_BIGCACHE) && size >= threshold) {
void* cached_ptr = NULL;
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_bc);
#endif
if (hak_bigcache_try_get(size, site_id, &cached_ptr)) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_BIGCACHE_GET, t_bc);
#endif
return cached_ptr;
}
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_BIGCACHE_GET, t_bc);
#endif
}
if (size > TINY_MAX_SIZE && size < threshold) {
const FrozenPolicy* pol = hkm_policy_get();
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_ace);
#endif
void* l1 = hkm_ace_alloc(size, site_id, pol);
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_POOL_GET, t_ace);
#endif
if (l1) return l1;
}
// PHASE 7 CRITICAL FIX: Handle allocation gap (1KB-8KB) when ACE is disabled
// Size range:
// 0-1024: Tiny allocator
// 1025-8191: Gap! (Mid starts at 8KB, ACE often disabled)
// 8KB-32KB: Mid allocator
// 32KB-2MB: ACE (if enabled, otherwise mmap)
// 2MB+: mmap
//
// Solution: Use mmap for gap when ACE failed (ACE disabled or OOM)
void* ptr;
if (size >= threshold) {
// Large allocation (>= 2MB default): use mmap
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_mmap);
#endif
ptr = hak_alloc_mmap_impl(size);
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_SYSCALL_MMAP, t_mmap);
#endif
} else if (size >= TINY_MAX_SIZE) {
// Mid-range allocation (1KB-2MB): try mmap as final fallback
// This handles the gap when ACE is disabled or failed
static _Atomic int gap_alloc_count = 0;
int count = atomic_fetch_add(&gap_alloc_count, 1);
if (count < 3) {
fprintf(stderr, "[HAKMEM] INFO: Using mmap for mid-range size=%zu (ACE disabled or failed)\n", size);
}
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_mmap);
#endif
ptr = hak_alloc_mmap_impl(size);
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_SYSCALL_MMAP, t_mmap);
#endif
} else {
// Should never reach here (size <= TINY_MAX_SIZE should be handled by Tiny)
static _Atomic int oom_count = 0;
int count = atomic_fetch_add(&oom_count, 1);
if (count < 10) {
fprintf(stderr, "[HAKMEM] OOM: Unexpected allocation path for size=%zu, returning NULL\n", size);
fprintf(stderr, "[HAKMEM] (OOM count: %d) This should not happen!\n", count + 1);
}
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_malloc);
HKM_TIME_END(HKM_CAT_FALLBACK_MALLOC, t_malloc); // Keep timing for compatibility
#endif
errno = ENOMEM;
return NULL;
}
if (!ptr) return NULL;
if (g_evo_sample_mask > 0) { hak_evo_record_size(size); }
AllocHeader* hdr = (AllocHeader*)((char*)ptr - HEADER_SIZE);
if (hdr->magic != HAKMEM_MAGIC) { fprintf(stderr, "[hakmem] ERROR: Invalid magic in allocated header!\n"); return ptr; }
hdr->alloc_site = site_id;
hdr->class_bytes = (size >= threshold) ? threshold : 0;
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_HAK_ALLOC, t0);
#endif
return ptr;
}
#endif // HAK_ALLOC_API_INC_H