Phase 23 Unified Cache + PageFaultTelemetry generalization: Mid/VM page-fault bottleneck identified
Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-17 02:47:58 +09:00
|
|
|
// pagefault_telemetry_box.c - Box PageFaultTelemetry implementation
|
|
|
|
|
|
|
|
|
|
#include "pagefault_telemetry_box.h"
|
|
|
|
|
|
2025-12-04 16:21:54 +09:00
|
|
|
#include "../hakmem_tiny_stats_api.h" // For macros / flags
|
|
|
|
|
#include "../hakmem_stats_master.h" // Phase 4d: Master stats control
|
Phase 23 Unified Cache + PageFaultTelemetry generalization: Mid/VM page-fault bottleneck identified
Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-17 02:47:58 +09:00
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
|
|
|
|
// Per-thread state
|
|
|
|
|
__thread uint64_t g_pf_bloom[PF_BUCKET_MAX][16] = {{0}};
|
|
|
|
|
__thread uint64_t g_pf_touch[PF_BUCKET_MAX] = {0};
|
|
|
|
|
|
|
|
|
|
// Enable flag (cached)
|
|
|
|
|
int pagefault_telemetry_enabled(void) {
|
|
|
|
|
static int g_enabled = -1;
|
|
|
|
|
if (__builtin_expect(g_enabled == -1, 0)) {
|
|
|
|
|
const char* env = getenv("HAKMEM_TINY_PAGEFAULT_TELEMETRY");
|
|
|
|
|
g_enabled = (env && *env && *env != '0') ? 1 : 0;
|
|
|
|
|
}
|
|
|
|
|
return g_enabled;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Dump helper
|
|
|
|
|
void pagefault_telemetry_dump(void) {
|
|
|
|
|
if (!pagefault_telemetry_enabled()) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-04 16:21:54 +09:00
|
|
|
if (!hak_stats_check("HAKMEM_TINY_PAGEFAULT_DUMP", "pagefault")) {
|
Phase 23 Unified Cache + PageFaultTelemetry generalization: Mid/VM page-fault bottleneck identified
Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-17 02:47:58 +09:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fprintf(stderr, "\n========== Box PageFaultTelemetry: Tiny Page Touch Stats ==========\n");
|
|
|
|
|
fprintf(stderr, "Note: pages ~= popcount(1024-bit bloom); collisions → 下限近似値\n\n");
|
|
|
|
|
fprintf(stderr, "%-5s %12s %12s %12s\n", "Bucket", "touches", "approx_pages", "touches/page");
|
|
|
|
|
fprintf(stderr, "------|------------|------------|------------\n");
|
|
|
|
|
|
|
|
|
|
for (int b = 0; b < PF_BUCKET_MAX; b++) {
|
|
|
|
|
uint64_t touches = g_pf_touch[b];
|
|
|
|
|
if (touches == 0) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint64_t bits = 0;
|
|
|
|
|
for (int w = 0; w < 16; w++) {
|
|
|
|
|
bits += (uint64_t)__builtin_popcountll(g_pf_bloom[b][w]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
double pages = (double)bits;
|
|
|
|
|
double tpp = pages > 0.0 ? (double)touches / pages : 0.0;
|
|
|
|
|
|
|
|
|
|
const char* name = NULL;
|
|
|
|
|
char buf[8];
|
|
|
|
|
if (b < PF_BUCKET_TINY_LIMIT) {
|
|
|
|
|
snprintf(buf, sizeof(buf), "C%d", b);
|
|
|
|
|
name = buf;
|
|
|
|
|
} else if (b == PF_BUCKET_MID) {
|
|
|
|
|
name = "MID";
|
|
|
|
|
} else if (b == PF_BUCKET_L25) {
|
|
|
|
|
name = "L25";
|
|
|
|
|
} else if (b == PF_BUCKET_SS_META) {
|
|
|
|
|
name = "SSM";
|
|
|
|
|
} else {
|
|
|
|
|
snprintf(buf, sizeof(buf), "X%d", b);
|
|
|
|
|
name = buf;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fprintf(stderr, "%-5s %12llu %12llu %12.1f\n",
|
|
|
|
|
name,
|
|
|
|
|
(unsigned long long)touches,
|
|
|
|
|
(unsigned long long)bits,
|
|
|
|
|
tpp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fprintf(stderr, "===============================================================\n\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Auto-dump at thread exit (bench系で 1 回だけ実行される想定)
|
|
|
|
|
static void pagefault_telemetry_atexit(void) __attribute__((destructor));
|
|
|
|
|
static void pagefault_telemetry_atexit(void) {
|
|
|
|
|
pagefault_telemetry_dump();
|
|
|
|
|
}
|