Phase 36-37: TinyHotHeap v2 HotBox redesign and C7 current_page policy fixes

- Redefine TinyHotHeap v2 as per-thread Hot Box with clear boundaries
- Add comprehensive OS statistics tracking for SS allocations
- Implement route-based free handling for TinyHeap v2
- Add C6/C7 debugging and statistics improvements
- Update documentation with implementation guidelines and analysis
- Add new box headers for stats, routing, and front-end management
This commit is contained in:
Moe Charm (CI)
2025-12-08 21:30:21 +09:00
parent 34a8fd69b6
commit 8f18963ad5
37 changed files with 3205 additions and 167 deletions

View File

@ -10,8 +10,13 @@
#include <unistd.h>
// Global counters for debugging (non-static for external access)
_Atomic uint64_t g_ss_mmap_count = 0;
_Atomic uint64_t g_final_fallback_mmap_count = 0;
extern _Atomic uint64_t g_ss_mmap_count;
extern _Atomic uint64_t g_final_fallback_mmap_count;
extern _Atomic uint64_t g_ss_os_alloc_calls;
extern _Atomic uint64_t g_ss_os_free_calls;
extern _Atomic uint64_t g_ss_os_madvise_calls;
extern _Atomic uint64_t g_ss_os_huge_alloc_calls;
extern _Atomic uint64_t g_ss_os_huge_fail_calls;
// ============================================================================
// OOM Diagnostics
@ -84,6 +89,55 @@ static void log_superslab_oom_once(size_t ss_size, size_t alloc_size, int err) {
g_hakmem_lock_depth--; // Now safe to restore (all libc calls complete)
}
// ============================================================================
// HugePage (experimental) helper
// ============================================================================
static void* ss_os_acquire_hugepage_try(size_t ss_size, uintptr_t ss_mask, int populate) {
#ifdef MAP_HUGETLB
size_t huge_sz = ss_os_huge_size_bytes();
if (ss_size != huge_sz) {
// For now, only attempt hugepage when requested SuperSlab size matches the HugePage size.
return NULL;
}
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB;
#ifdef MAP_POPULATE
if (populate) {
flags |= MAP_POPULATE;
}
#endif
#ifdef MAP_HUGE_2MB
// Best-effort: allow the kernel to pick 2MB huge pages explicitly when available.
if (huge_sz == (2ULL << 20)) {
flags |= MAP_HUGE_2MB;
}
#endif
void* ptr = mmap(NULL, huge_sz, PROT_READ | PROT_WRITE, flags, -1, 0);
if (ptr == MAP_FAILED) {
ss_os_stats_record_huge_fail();
return NULL;
}
if (((uintptr_t)ptr & ss_mask) != 0) {
munmap(ptr, huge_sz);
ss_os_stats_record_huge_fail();
return NULL;
}
ss_os_stats_record_huge_alloc();
ss_os_stats_record_alloc();
atomic_fetch_add(&g_ss_mmap_count, 1);
return ptr;
#else
(void)ss_size;
(void)ss_mask;
(void)populate;
return NULL;
#endif
}
// ============================================================================
// OS Acquisition Implementation
// ============================================================================
@ -94,6 +148,14 @@ void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int p
(void)size_class; // Used only for logging in debug builds
// Experimental HugePage path (research-only, default OFF)
if (ss_os_huge_enabled()) {
void* huge = ss_os_acquire_hugepage_try(ss_size, ss_mask, populate);
if (huge != NULL) {
return huge;
}
}
#ifdef MAP_ALIGNED_SUPER
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER;
#ifdef MAP_POPULATE
@ -107,6 +169,7 @@ void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int p
-1, 0);
if (ptr != MAP_FAILED) {
atomic_fetch_add(&g_ss_mmap_count, 1);
ss_os_stats_record_alloc();
if (((uintptr_t)ptr & ss_mask) == 0) {
// Successfully got aligned pointer from OS
return ptr;
@ -140,6 +203,7 @@ void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int p
-1, 0);
if (raw != MAP_FAILED) {
uint64_t count = atomic_fetch_add(&g_ss_mmap_count, 1) + 1;
ss_os_stats_record_alloc();
#if !HAKMEM_BUILD_RELEASE
if (log_count < 10) {
fprintf(stderr, "[SUPERSLAB_MMAP] #%lu: class=%d size=%zu (total SuperSlab mmaps so far)\n",
@ -177,6 +241,7 @@ void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int p
#ifdef MADV_POPULATE_WRITE
if (populate) {
int ret = madvise(ptr, ss_size, MADV_POPULATE_WRITE);
ss_os_stats_record_madvise();
if (ret != 0) {
// Fallback for kernels that support MADV_POPULATE_WRITE but it fails
// Use explicit page-by-page touching with writes
@ -195,8 +260,25 @@ void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int p
p[i] = 0;
}
p[ss_size - 1] = 0;
ss_os_stats_record_madvise();
}
#endif
return ptr;
}
static void ss_os_stats_destructor(void) __attribute__((destructor));
static void ss_os_stats_destructor(void) {
if (!ss_os_stats_enabled()) {
return;
}
fprintf(stderr,
"[SS_OS_STATS] alloc=%llu free=%llu madvise=%llu mmap_total=%llu fallback_mmap=%llu huge_alloc=%llu huge_fail=%llu\n",
(unsigned long long)atomic_load_explicit(&g_ss_os_alloc_calls, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_free_calls, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_madvise_calls, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_mmap_count, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_final_fallback_mmap_count, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_huge_alloc_calls, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_huge_fail_calls, memory_order_relaxed));
}