Files
hakmem/core/box/hak_alloc_api.inc.h
Moe Charm (CI) 48fadea590 Phase 7-1.1: Fix 1024B crash (header validation + malloc fallback)
Fixed critical bugs preventing Phase 7 from working with 1024B allocations.

## Bug Fixes (by Task Agent Ultrathink)

1. **Header Validation Missing in Release Builds**
   - `core/tiny_region_id.h:73-97` - Removed `#if !HAKMEM_BUILD_RELEASE`
   - Always validate magic byte and class_idx (prevents SEGV on Mid/Large)

2. **1024B Malloc Fallback Missing**
   - `core/box/hak_alloc_api.inc.h:35-49` - Direct fallback to malloc
   - Phase 7 rejects 1024B (needs header) → skip ACE → use malloc

## Test Results

| Test | Result |
|------|--------|
| 128B, 512B, 1023B (Tiny) | +39%~+436%  |
| 1024B only (100 allocs) | 100% success  |
| Mixed 128B+1024B (200) | 100% success  |
| bench_random_mixed 1024B | Still crashes  |

## Known Issue

`bench_random_mixed` with 1024B still crashes (intermittent SEGV).
Simple tests pass, suggesting issue is with complex allocation patterns.
Investigation pending.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
Co-Authored-By: Task Agent Ultrathink
2025-11-08 03:35:07 +09:00

149 lines
4.6 KiB
C

// hak_alloc_api.inc.h — Box: hak_alloc_at() implementation
#ifndef HAK_ALLOC_API_INC_H
#define HAK_ALLOC_API_INC_H
__attribute__((always_inline))
inline void* hak_alloc_at(size_t size, hak_callsite_t site) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t0);
#endif
if (!g_initialized) hak_init();
uintptr_t site_id = (uintptr_t)site;
if (__builtin_expect(size <= TINY_MAX_SIZE, 1)) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_tiny);
#endif
void* tiny_ptr = NULL;
#ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR
tiny_ptr = hak_tiny_alloc_fast_wrapper(size);
#elif defined(HAKMEM_TINY_PHASE6_ULTRA_SIMPLE)
tiny_ptr = hak_tiny_alloc_ultra_simple(size);
#elif defined(HAKMEM_TINY_PHASE6_METADATA)
tiny_ptr = hak_tiny_alloc_metadata(size);
#else
tiny_ptr = hak_tiny_alloc(size);
#endif
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_TINY_ALLOC, t_tiny);
#endif
if (tiny_ptr) { hkm_ace_track_alloc(); return tiny_ptr; }
// Phase 7: If Tiny rejects size <= TINY_MAX_SIZE (e.g., 1024B needs header),
// skip Mid/ACE and route directly to malloc fallback
#if HAKMEM_TINY_HEADER_CLASSIDX
if (size <= TINY_MAX_SIZE) {
// Tiny rejected this size (likely 1024B), use malloc directly
static int log_count = 0;
if (log_count < 3) {
fprintf(stderr, "[DEBUG] Phase 7: tiny_alloc(%zu) rejected, using malloc fallback\n", size);
log_count++;
}
void* fallback_ptr = hak_alloc_malloc_impl(size);
if (fallback_ptr) return fallback_ptr;
// If malloc fails, continue to other fallbacks below
}
#else
static int log_count = 0; if (log_count < 3) { fprintf(stderr, "[DEBUG] tiny_alloc(%zu) returned NULL, falling back\n", size); log_count++; }
#endif
}
hkm_size_hist_record(size);
if (__builtin_expect(mid_is_in_range(size), 0)) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_mid);
#endif
void* mid_ptr = mid_mt_alloc(size);
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_POOL_GET, t_mid);
#endif
if (mid_ptr) return mid_ptr;
}
#if HAKMEM_FEATURE_EVOLUTION
if (g_evo_sample_mask > 0) {
static _Atomic uint64_t tick_counter = 0;
if ((atomic_fetch_add(&tick_counter, 1) & g_evo_sample_mask) == 0) {
struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now);
uint64_t now_ns = now.tv_sec * 1000000000ULL + now.tv_nsec;
if (hak_evo_tick(now_ns)) {
int new_strategy = hak_elo_select_strategy();
atomic_store(&g_cached_strategy_id, new_strategy);
}
}
}
#endif
size_t threshold;
if (HAK_ENABLED_LEARNING(HAKMEM_FEATURE_ELO)) {
int strategy_id = atomic_load(&g_cached_strategy_id);
threshold = hak_elo_get_threshold(strategy_id);
} else {
threshold = 2097152;
}
if (HAK_ENABLED_CACHE(HAKMEM_FEATURE_BIGCACHE) && size >= threshold) {
void* cached_ptr = NULL;
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_bc);
#endif
if (hak_bigcache_try_get(size, site_id, &cached_ptr)) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_BIGCACHE_GET, t_bc);
#endif
return cached_ptr;
}
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_BIGCACHE_GET, t_bc);
#endif
}
if (size > TINY_MAX_SIZE && size < threshold) {
const FrozenPolicy* pol = hkm_policy_get();
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_ace);
#endif
void* l1 = hkm_ace_alloc(size, site_id, pol);
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_POOL_GET, t_ace);
#endif
if (l1) return l1;
}
void* ptr;
if (size >= threshold) {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_mmap);
#endif
ptr = hak_alloc_mmap_impl(size);
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_SYSCALL_MMAP, t_mmap);
#endif
} else {
#if HAKMEM_DEBUG_TIMING
HKM_TIME_START(t_malloc);
#endif
ptr = hak_alloc_malloc_impl(size);
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_FALLBACK_MALLOC, t_malloc);
#endif
}
if (!ptr) return NULL;
if (g_evo_sample_mask > 0) { hak_evo_record_size(size); }
AllocHeader* hdr = (AllocHeader*)((char*)ptr - HEADER_SIZE);
if (hdr->magic != HAKMEM_MAGIC) { fprintf(stderr, "[hakmem] ERROR: Invalid magic in allocated header!\n"); return ptr; }
hdr->alloc_site = site_id;
hdr->class_bytes = (size >= threshold) ? threshold : 0;
#if HAKMEM_DEBUG_TIMING
HKM_TIME_END(HKM_CAT_HAK_ALLOC, t0);
#endif
return ptr;
}
#endif // HAK_ALLOC_API_INC_H