Phase 23 Unified Cache + PageFaultTelemetry generalization: Mid/VM page-fault bottleneck identified
Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@ -2,6 +2,8 @@
|
||||
#ifndef POOL_API_INC_H
|
||||
#define POOL_API_INC_H
|
||||
|
||||
#include "pagefault_telemetry_box.h" // Box PageFaultTelemetry (PF_BUCKET_MID)
|
||||
|
||||
void* hak_pool_try_alloc(size_t size, uintptr_t site_id) {
|
||||
// Debug: IMMEDIATE output to verify function is called
|
||||
static int first_call = 1;
|
||||
@ -52,10 +54,12 @@ void* hak_pool_try_alloc(size_t size, uintptr_t site_id) {
|
||||
void* raw = (void*)tlsb;
|
||||
AllocHeader* hdr = (AllocHeader*)raw;
|
||||
mid_set_header(hdr, g_class_sizes[class_idx], site_id);
|
||||
void* user0 = (char*)raw + HEADER_SIZE;
|
||||
mid_page_inuse_inc(raw);
|
||||
t_pool_rng ^= t_pool_rng << 13; t_pool_rng ^= t_pool_rng >> 17; t_pool_rng ^= t_pool_rng << 5;
|
||||
if ((t_pool_rng & ((1u<<g_count_sample_exp)-1u)) == 0u) g_pool.hits[class_idx]++;
|
||||
return (char*)raw + HEADER_SIZE;
|
||||
pagefault_telemetry_touch(PF_BUCKET_MID, user0);
|
||||
return user0;
|
||||
}
|
||||
} else { HKM_TIME_END(HKM_CAT_TC_DRAIN, t_tc_drain); }
|
||||
}
|
||||
@ -70,9 +74,11 @@ void* hak_pool_try_alloc(size_t size, uintptr_t site_id) {
|
||||
void* raw = (void*)tlsb;
|
||||
AllocHeader* hdr = (AllocHeader*)raw;
|
||||
mid_set_header(hdr, g_class_sizes[class_idx], site_id);
|
||||
void* user1 = (char*)raw + HEADER_SIZE;
|
||||
t_pool_rng ^= t_pool_rng << 13; t_pool_rng ^= t_pool_rng >> 17; t_pool_rng ^= t_pool_rng << 5;
|
||||
if ((t_pool_rng & ((1u<<g_count_sample_exp)-1u)) == 0u) g_pool.hits[class_idx]++;
|
||||
return (char*)raw + HEADER_SIZE;
|
||||
pagefault_telemetry_touch(PF_BUCKET_MID, user1);
|
||||
return user1;
|
||||
}
|
||||
}
|
||||
if (g_tls_bin[class_idx].lo_head) {
|
||||
@ -83,10 +89,12 @@ void* hak_pool_try_alloc(size_t size, uintptr_t site_id) {
|
||||
HKM_TIME_END(HKM_CAT_POOL_TLS_LIFO_POP, t_lifo_pop0);
|
||||
void* raw = (void*)b; AllocHeader* hdr = (AllocHeader*)raw;
|
||||
mid_set_header(hdr, g_class_sizes[class_idx], site_id);
|
||||
void* user2 = (char*)raw + HEADER_SIZE;
|
||||
mid_page_inuse_inc(raw);
|
||||
t_pool_rng ^= t_pool_rng << 13; t_pool_rng ^= t_pool_rng >> 17; t_pool_rng ^= t_pool_rng << 5;
|
||||
if ((t_pool_rng & ((1u<<g_count_sample_exp)-1u)) == 0u) g_pool.hits[class_idx]++;
|
||||
return (char*)raw + HEADER_SIZE;
|
||||
pagefault_telemetry_touch(PF_BUCKET_MID, user2);
|
||||
return user2;
|
||||
}
|
||||
|
||||
// Compute shard only when we need to access shared structures
|
||||
@ -231,9 +239,11 @@ void* hak_pool_try_alloc(size_t size, uintptr_t site_id) {
|
||||
else if (ap->page && ap->count > 0 && ap->bump < ap->end) { takeb = (PoolBlock*)(void*)ap->bump; ap->bump += (HEADER_SIZE + g_class_sizes[class_idx]); ap->count--; if (ap->bump >= ap->end || ap->count==0){ ap->page=NULL; ap->count=0; } }
|
||||
void* raw2 = (void*)takeb; AllocHeader* hdr2 = (AllocHeader*)raw2;
|
||||
mid_set_header(hdr2, g_class_sizes[class_idx], site_id);
|
||||
void* user3 = (char*)raw2 + HEADER_SIZE;
|
||||
mid_page_inuse_inc(raw2);
|
||||
g_pool.hits[class_idx]++;
|
||||
return (char*)raw2 + HEADER_SIZE;
|
||||
pagefault_telemetry_touch(PF_BUCKET_MID, user3);
|
||||
return user3;
|
||||
}
|
||||
HKM_TIME_START(t_refill);
|
||||
struct timespec ts_rf; int rf = hkm_prof_begin(&ts_rf);
|
||||
@ -266,8 +276,10 @@ void* hak_pool_try_alloc(size_t size, uintptr_t site_id) {
|
||||
|
||||
void* raw = (void*)take; AllocHeader* hdr = (AllocHeader*)raw;
|
||||
mid_set_header(hdr, g_class_sizes[class_idx], site_id);
|
||||
void* user4 = (char*)raw + HEADER_SIZE;
|
||||
mid_page_inuse_inc(raw);
|
||||
return (char*)raw + HEADER_SIZE;
|
||||
pagefault_telemetry_touch(PF_BUCKET_MID, user4);
|
||||
return user4;
|
||||
}
|
||||
|
||||
void hak_pool_free(void* ptr, size_t size, uintptr_t site_id) {
|
||||
|
||||
Reference in New Issue
Block a user