Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
293 lines
9.7 KiB
C
293 lines
9.7 KiB
C
// front_gate_classifier.c - Box FG: Pointer Classification Implementation
|
|
|
|
// CRITICAL: Box FG requires header-based classification
|
|
// Ensure HEADER_MAGIC and HEADER_CLASS_MASK are available
|
|
#ifndef HAKMEM_TINY_HEADER_CLASSIDX
|
|
#define HAKMEM_TINY_HEADER_CLASSIDX 1
|
|
#endif
|
|
|
|
#include <stdio.h> // For fprintf in debug
|
|
#include <stdlib.h> // For abort in debug
|
|
#include <sys/mman.h> // For mincore() in Step 3 safety check
|
|
#include "front_gate_classifier.h"
|
|
#include "../tiny_region_id.h" // Must come before hakmem_tiny_superslab.h for HEADER_MAGIC
|
|
#include "../hakmem_tiny_superslab.h"
|
|
#include "../superslab/superslab_inline.h" // For ss_slabs_capacity
|
|
#include "../hakmem_build_flags.h"
|
|
#include "../hakmem_internal.h" // AllocHeader, HAKMEM_MAGIC, HEADER_SIZE, hak_is_memory_readable
|
|
#include "../hakmem_tiny_config.h" // For TINY_NUM_CLASSES, SLAB_SIZE
|
|
#include "../hakmem_super_registry.h" // For hak_super_lookup (Box REG)
|
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
|
#include "../pool_tls_registry.h" // Safer pool pointer lookup (no header deref)
|
|
#endif
|
|
|
|
// ========== Debug Stats ==========
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
__thread uint64_t g_classify_header_hit = 0;
|
|
__thread uint64_t g_classify_headerless_hit = 0;
|
|
__thread uint64_t g_classify_pool_hit = 0;
|
|
__thread uint64_t g_classify_mid_large_hit = 0;
|
|
__thread uint64_t g_classify_unknown_hit = 0;
|
|
|
|
void front_gate_print_stats(void) {
|
|
uint64_t total = g_classify_header_hit + g_classify_headerless_hit +
|
|
g_classify_pool_hit + g_classify_mid_large_hit +
|
|
g_classify_unknown_hit;
|
|
if (total == 0) return;
|
|
|
|
fprintf(stderr, "\n========== Front Gate Classification Stats ==========\n");
|
|
fprintf(stderr, "Header (C0-C6): %lu (%.2f%%)\n",
|
|
g_classify_header_hit, 100.0 * g_classify_header_hit / total);
|
|
fprintf(stderr, "Headerless (C7): %lu (%.2f%%)\n",
|
|
g_classify_headerless_hit, 100.0 * g_classify_headerless_hit / total);
|
|
fprintf(stderr, "Pool TLS: %lu (%.2f%%)\n",
|
|
g_classify_pool_hit, 100.0 * g_classify_pool_hit / total);
|
|
fprintf(stderr, "Mid-Large (MMAP): %lu (%.2f%%)\n",
|
|
g_classify_mid_large_hit, 100.0 * g_classify_mid_large_hit / total);
|
|
fprintf(stderr, "Unknown: %lu (%.2f%%)\n",
|
|
g_classify_unknown_hit, 100.0 * g_classify_unknown_hit / total);
|
|
fprintf(stderr, "Total: %lu\n", total);
|
|
fprintf(stderr, "======================================================\n");
|
|
}
|
|
|
|
static void __attribute__((destructor)) front_gate_stats_destructor(void) {
|
|
front_gate_print_stats();
|
|
}
|
|
#endif
|
|
|
|
// ========== Safe Header Probe ==========
|
|
|
|
// Try to read 1-byte header at ptr-1 (safe conditions only)
|
|
// Returns: class_idx (0-7) on success, -1 on failure
|
|
//
|
|
// Safety conditions:
|
|
// 1. Same page: (ptr & 0xFFF) >= 1 → header won't cross page boundary
|
|
// 2. Valid magic: (header & 0xF0) == HEADER_MAGIC (0xa0)
|
|
// 3. Valid class: class_idx in range [0, 7]
|
|
//
|
|
// Performance: 2-3 cycles (L1 cache hit)
|
|
static inline int safe_header_probe(void* ptr) {
|
|
// Reject obviously invalid/sentinel-sized pointers (defense-in-depth)
|
|
if ((uintptr_t)ptr < 4096) {
|
|
return -1;
|
|
}
|
|
// Safety check: header must be in same page as ptr
|
|
uintptr_t offset_in_page = (uintptr_t)ptr & 0xFFF;
|
|
if (offset_in_page == 0) {
|
|
// ptr is page-aligned → header would be on previous page (unsafe)
|
|
return -1;
|
|
}
|
|
|
|
// Safe to read header (same page guaranteed)
|
|
uint8_t* header_ptr = (uint8_t*)ptr - 1;
|
|
uint8_t header = *header_ptr;
|
|
|
|
// Validate magic
|
|
if ((header & 0xF0) != HEADER_MAGIC) {
|
|
return -1; // Not a Tiny header
|
|
}
|
|
|
|
// Extract class index
|
|
int class_idx = header & HEADER_CLASS_MASK;
|
|
|
|
// Phase E1-CORRECT: Validate class range (all classes 0-7 valid)
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) {
|
|
return -1; // Invalid class
|
|
}
|
|
|
|
return class_idx;
|
|
}
|
|
|
|
// ========== Registry Lookup ==========
|
|
|
|
// Lookup pointer in SuperSlab registry (fallback when header probe fails)
|
|
// Returns: classification result with SuperSlab + class_idx + slab_idx
|
|
//
|
|
// Performance: 50-100 cycles (hash lookup + validation)
|
|
static inline ptr_classification_t registry_lookup(void* ptr) {
|
|
ptr_classification_t result = {
|
|
.kind = PTR_KIND_UNKNOWN,
|
|
.class_idx = -1,
|
|
.ss = NULL,
|
|
.slab_idx = -1
|
|
};
|
|
|
|
// Query SuperSlab registry
|
|
struct SuperSlab* ss = hak_super_lookup(ptr);
|
|
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
|
|
// Not in Tiny registry
|
|
return result;
|
|
}
|
|
|
|
// Found SuperSlab - determine slab index from ptr-1 (block base)
|
|
result.ss = ss;
|
|
|
|
uintptr_t ptr_addr = (uintptr_t)ptr;
|
|
uintptr_t ss_addr = (uintptr_t)ss;
|
|
if (ptr_addr <= ss_addr) {
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
return result;
|
|
}
|
|
|
|
// Use block base for slab index to be consistent with free paths
|
|
uintptr_t base_addr = ptr_addr - 1;
|
|
size_t offset = base_addr - ss_addr;
|
|
int slab_idx = (int)(offset / SLAB_SIZE);
|
|
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
return result;
|
|
}
|
|
|
|
result.slab_idx = slab_idx;
|
|
TinySlabMeta* meta = &ss->slabs[slab_idx];
|
|
int cls = (meta->class_idx < TINY_NUM_CLASSES) ? (int)meta->class_idx : -1;
|
|
result.class_idx = cls;
|
|
|
|
if (cls == 7) {
|
|
// 1KB headerless tiny
|
|
result.kind = PTR_KIND_TINY_HEADERLESS;
|
|
} else if (cls >= 0) {
|
|
// Other tiny classes with 1-byte header
|
|
result.kind = PTR_KIND_TINY_HEADER;
|
|
} else {
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
// ========== Pool TLS Probe ==========
|
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
|
// Registry-based Pool TLS probe (no memory deref)
|
|
static inline int is_pool_tls_reg(void* ptr) {
|
|
pid_t tid = 0; int cls = -1;
|
|
return pool_reg_lookup(ptr, &tid, &cls);
|
|
}
|
|
#endif
|
|
|
|
// ========== Front Gate Entry Point ==========
|
|
|
|
ptr_classification_t classify_ptr(void* ptr) {
|
|
ptr_classification_t result = {
|
|
.kind = PTR_KIND_UNKNOWN,
|
|
.class_idx = -1,
|
|
.ss = NULL,
|
|
.slab_idx = -1
|
|
};
|
|
|
|
if (!ptr) return result;
|
|
// Early guard: reject non-canonical tiny integers to avoid ptr-1 probe crashes
|
|
if ((uintptr_t)ptr < 4096) {
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
return result;
|
|
}
|
|
|
|
// ========== FAST PATH: Header-Based Classification ==========
|
|
// Performance: 2-5 cycles (vs 50-100 cycles for registry lookup)
|
|
// Rationale: Tiny (0xa0) and Pool TLS (0xb0) use distinct magic bytes
|
|
//
|
|
// Safety checks:
|
|
// 1. Same-page guard: header must be in same page as ptr
|
|
// 2. Magic validation: distinguish Tiny/Pool/Unknown
|
|
//
|
|
uintptr_t offset_in_page = (uintptr_t)ptr & 0xFFF;
|
|
if (offset_in_page >= 1) {
|
|
// Safe to read header (won't cross page boundary)
|
|
uint8_t header = *((uint8_t*)ptr - 1);
|
|
uint8_t magic = header & 0xF0;
|
|
|
|
// Fast path: Tiny allocation (magic = 0xa0)
|
|
if (magic == HEADER_MAGIC) { // HEADER_MAGIC = 0xa0
|
|
int class_idx = header & HEADER_CLASS_MASK;
|
|
if (class_idx >= 0 && class_idx < TINY_NUM_CLASSES) {
|
|
result.kind = PTR_KIND_TINY_HEADER;
|
|
result.class_idx = class_idx;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_classify_header_hit++;
|
|
#endif
|
|
return result;
|
|
}
|
|
}
|
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
|
// Fast path: Pool TLS allocation (magic = 0xb0)
|
|
if (magic == 0xb0) { // POOL_MAGIC
|
|
result.kind = PTR_KIND_POOL_TLS;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_classify_pool_hit++;
|
|
#endif
|
|
return result;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
// ========== SLOW PATH: Registry Lookup (Fallback) ==========
|
|
// Used when:
|
|
// - ptr is page-aligned (offset_in_page == 0)
|
|
// - magic doesn't match Tiny/Pool (0xa0/0xb0)
|
|
// - Headerless allocations (C7 1KB class, if exists)
|
|
//
|
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
|
// Check Pool TLS registry (for page-aligned pointers)
|
|
if (is_pool_tls_reg(ptr)) {
|
|
result.kind = PTR_KIND_POOL_TLS;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_classify_pool_hit++;
|
|
#endif
|
|
return result;
|
|
}
|
|
#endif
|
|
|
|
// Registry lookup for Tiny (header or headerless)
|
|
result = registry_lookup(ptr);
|
|
if (result.kind == PTR_KIND_TINY_HEADERLESS) {
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_classify_headerless_hit++;
|
|
#endif
|
|
return result;
|
|
}
|
|
if (result.kind == PTR_KIND_TINY_HEADER) {
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_classify_header_hit++;
|
|
#endif
|
|
return result;
|
|
}
|
|
|
|
// Check for Mid-Large allocation with AllocHeader (MMAP/POOL/L25_POOL)
|
|
// AllocHeader is placed before user pointer (user_ptr - HEADER_SIZE)
|
|
//
|
|
// Safety check: Need at least HEADER_SIZE (40 bytes) before ptr to read AllocHeader
|
|
// If ptr is too close to page start, skip this check (avoid SEGV)
|
|
uintptr_t offset_in_page_for_hdr = (uintptr_t)ptr & 0xFFF;
|
|
if (offset_in_page_for_hdr >= HEADER_SIZE) {
|
|
// Safe to read AllocHeader (won't cross page boundary)
|
|
AllocHeader* hdr = hak_header_from_user(ptr);
|
|
if (hak_header_validate(hdr)) {
|
|
// Valid HAKMEM header found
|
|
if (hdr->method == ALLOC_METHOD_MMAP ||
|
|
hdr->method == ALLOC_METHOD_POOL ||
|
|
hdr->method == ALLOC_METHOD_L25_POOL) {
|
|
result.kind = PTR_KIND_MID_LARGE;
|
|
result.ss = NULL;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_classify_mid_large_hit++;
|
|
#endif
|
|
return result;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Unknown pointer (external allocation or Mid/Large)
|
|
// Let free wrapper handle Mid/Large registry lookups
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_classify_unknown_hit++;
|
|
#endif
|
|
|
|
return result;
|
|
}
|