2025-11-10 16:48:20 +09:00
|
|
|
// front_gate_classifier.c - Box FG: Pointer Classification Implementation
|
|
|
|
|
|
|
|
|
|
// CRITICAL: Box FG requires header-based classification
|
|
|
|
|
// Ensure HEADER_MAGIC and HEADER_CLASS_MASK are available
|
|
|
|
|
#ifndef HAKMEM_TINY_HEADER_CLASSIDX
|
|
|
|
|
#define HAKMEM_TINY_HEADER_CLASSIDX 1
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#include <stdio.h> // For fprintf in debug
|
|
|
|
|
#include <stdlib.h> // For abort in debug
|
2025-11-14 06:09:02 +09:00
|
|
|
#include <sys/mman.h> // For mincore() in Step 3 safety check
|
2025-11-10 16:48:20 +09:00
|
|
|
#include "front_gate_classifier.h"
|
|
|
|
|
#include "../tiny_region_id.h" // Must come before hakmem_tiny_superslab.h for HEADER_MAGIC
|
|
|
|
|
#include "../hakmem_tiny_superslab.h"
|
|
|
|
|
#include "../superslab/superslab_inline.h" // For ss_slabs_capacity
|
|
|
|
|
#include "../hakmem_build_flags.h"
|
2025-11-11 10:00:36 +09:00
|
|
|
#include "../hakmem_internal.h" // AllocHeader, HAKMEM_MAGIC, HEADER_SIZE, hak_is_memory_readable
|
2025-11-10 16:48:20 +09:00
|
|
|
#include "../hakmem_tiny_config.h" // For TINY_NUM_CLASSES, SLAB_SIZE
|
|
|
|
|
#include "../hakmem_super_registry.h" // For hak_super_lookup (Box REG)
|
|
|
|
|
|
|
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
2025-11-11 01:00:37 +09:00
|
|
|
#include "../pool_tls_registry.h" // Safer pool pointer lookup (no header deref)
|
2025-11-10 16:48:20 +09:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// ========== Debug Stats ==========
|
|
|
|
|
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
__thread uint64_t g_classify_header_hit = 0;
|
|
|
|
|
__thread uint64_t g_classify_headerless_hit = 0;
|
|
|
|
|
__thread uint64_t g_classify_pool_hit = 0;
|
Phase 23 Unified Cache + PageFaultTelemetry generalization: Mid/VM page-fault bottleneck identified
Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-17 02:47:58 +09:00
|
|
|
__thread uint64_t g_classify_mid_large_hit = 0;
|
2025-11-10 16:48:20 +09:00
|
|
|
__thread uint64_t g_classify_unknown_hit = 0;
|
|
|
|
|
|
|
|
|
|
void front_gate_print_stats(void) {
|
|
|
|
|
uint64_t total = g_classify_header_hit + g_classify_headerless_hit +
|
Phase 23 Unified Cache + PageFaultTelemetry generalization: Mid/VM page-fault bottleneck identified
Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-17 02:47:58 +09:00
|
|
|
g_classify_pool_hit + g_classify_mid_large_hit +
|
|
|
|
|
g_classify_unknown_hit;
|
2025-11-10 16:48:20 +09:00
|
|
|
if (total == 0) return;
|
|
|
|
|
|
|
|
|
|
fprintf(stderr, "\n========== Front Gate Classification Stats ==========\n");
|
|
|
|
|
fprintf(stderr, "Header (C0-C6): %lu (%.2f%%)\n",
|
|
|
|
|
g_classify_header_hit, 100.0 * g_classify_header_hit / total);
|
|
|
|
|
fprintf(stderr, "Headerless (C7): %lu (%.2f%%)\n",
|
|
|
|
|
g_classify_headerless_hit, 100.0 * g_classify_headerless_hit / total);
|
|
|
|
|
fprintf(stderr, "Pool TLS: %lu (%.2f%%)\n",
|
|
|
|
|
g_classify_pool_hit, 100.0 * g_classify_pool_hit / total);
|
Phase 23 Unified Cache + PageFaultTelemetry generalization: Mid/VM page-fault bottleneck identified
Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-17 02:47:58 +09:00
|
|
|
fprintf(stderr, "Mid-Large (MMAP): %lu (%.2f%%)\n",
|
|
|
|
|
g_classify_mid_large_hit, 100.0 * g_classify_mid_large_hit / total);
|
2025-11-10 16:48:20 +09:00
|
|
|
fprintf(stderr, "Unknown: %lu (%.2f%%)\n",
|
|
|
|
|
g_classify_unknown_hit, 100.0 * g_classify_unknown_hit / total);
|
|
|
|
|
fprintf(stderr, "Total: %lu\n", total);
|
|
|
|
|
fprintf(stderr, "======================================================\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void __attribute__((destructor)) front_gate_stats_destructor(void) {
|
|
|
|
|
front_gate_print_stats();
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// ========== Safe Header Probe ==========
|
|
|
|
|
|
|
|
|
|
// Try to read 1-byte header at ptr-1 (safe conditions only)
|
|
|
|
|
// Returns: class_idx (0-7) on success, -1 on failure
|
|
|
|
|
//
|
|
|
|
|
// Safety conditions:
|
|
|
|
|
// 1. Same page: (ptr & 0xFFF) >= 1 → header won't cross page boundary
|
|
|
|
|
// 2. Valid magic: (header & 0xF0) == HEADER_MAGIC (0xa0)
|
|
|
|
|
// 3. Valid class: class_idx in range [0, 7]
|
|
|
|
|
//
|
|
|
|
|
// Performance: 2-3 cycles (L1 cache hit)
|
|
|
|
|
static inline int safe_header_probe(void* ptr) {
|
2025-11-10 18:04:08 +09:00
|
|
|
// Reject obviously invalid/sentinel-sized pointers (defense-in-depth)
|
|
|
|
|
if ((uintptr_t)ptr < 4096) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
2025-11-10 16:48:20 +09:00
|
|
|
// Safety check: header must be in same page as ptr
|
|
|
|
|
uintptr_t offset_in_page = (uintptr_t)ptr & 0xFFF;
|
|
|
|
|
if (offset_in_page == 0) {
|
|
|
|
|
// ptr is page-aligned → header would be on previous page (unsafe)
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Safe to read header (same page guaranteed)
|
|
|
|
|
uint8_t* header_ptr = (uint8_t*)ptr - 1;
|
|
|
|
|
uint8_t header = *header_ptr;
|
|
|
|
|
|
|
|
|
|
// Validate magic
|
|
|
|
|
if ((header & 0xF0) != HEADER_MAGIC) {
|
|
|
|
|
return -1; // Not a Tiny header
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Extract class index
|
|
|
|
|
int class_idx = header & HEADER_CLASS_MASK;
|
|
|
|
|
|
Phase E3-FINAL: Fix Box API offset bugs - ALL classes now use correct offsets
## Root Cause Analysis (GPT5)
**Physical Layout Constraints**:
- Class 0: 8B = [1B header][7B payload] → offset 1 = 9B needed = ❌ IMPOSSIBLE
- Class 1-6: >=16B = [1B header][15B+ payload] → offset 1 = ✅ POSSIBLE
- Class 7: 1KB → offset 0 (compatibility)
**Correct Specification**:
- HAKMEM_TINY_HEADER_CLASSIDX != 0:
- Class 0, 7: next at offset 0 (overwrites header when on freelist)
- Class 1-6: next at offset 1 (after header)
- HAKMEM_TINY_HEADER_CLASSIDX == 0:
- All classes: next at offset 0
**Previous Bug**:
- Attempted "ALL classes offset 1" unification
- Class 0 with offset 1 caused immediate SEGV (9B > 8B block size)
- Mixed 2-arg/3-arg API caused confusion
## Fixes Applied
### 1. Restored 3-Argument Box API (core/box/tiny_next_ptr_box.h)
```c
// Correct signatures
void tiny_next_write(int class_idx, void* base, void* next_value)
void* tiny_next_read(int class_idx, const void* base)
// Correct offset calculation
size_t offset = (class_idx == 0 || class_idx == 7) ? 0 : 1;
```
### 2. Updated 123+ Call Sites Across 34 Files
- hakmem_tiny_hot_pop_v4.inc.h (4 locations)
- hakmem_tiny_fastcache.inc.h (3 locations)
- hakmem_tiny_tls_list.h (12 locations)
- superslab_inline.h (5 locations)
- tiny_fastcache.h (3 locations)
- ptr_trace.h (macro definitions)
- tls_sll_box.h (2 locations)
- + 27 additional files
Pattern: `tiny_next_read(base)` → `tiny_next_read(class_idx, base)`
Pattern: `tiny_next_write(base, next)` → `tiny_next_write(class_idx, base, next)`
### 3. Added Sentinel Detection Guards
- tiny_fast_push(): Block nodes with sentinel in ptr or ptr->next
- tls_list_push(): Block nodes with sentinel in ptr or ptr->next
- Defense-in-depth against remote free sentinel leakage
## Verification (GPT5 Report)
**Test Command**: `./out/release/bench_random_mixed_hakmem --iterations=70000`
**Results**:
- ✅ Main loop completed successfully
- ✅ Drain phase completed successfully
- ✅ NO SEGV (previous crash at iteration 66151 is FIXED)
- ℹ️ Final log: "tiny_alloc(1024) failed" is normal fallback to Mid/ACE layers
**Analysis**:
- Class 0 immediate SEGV: ✅ RESOLVED (correct offset 0 now used)
- 66K iteration crash: ✅ RESOLVED (offset consistency fixed)
- Box API conflicts: ✅ RESOLVED (unified 3-arg API)
## Technical Details
### Offset Logic Justification
```
Class 0: 8B block → next pointer (8B) fits ONLY at offset 0
Class 1: 16B block → next pointer (8B) fits at offset 1 (after 1B header)
Class 2: 32B block → next pointer (8B) fits at offset 1
...
Class 6: 512B block → next pointer (8B) fits at offset 1
Class 7: 1024B block → offset 0 for legacy compatibility
```
### Files Modified (Summary)
- Core API: `box/tiny_next_ptr_box.h`
- Hot paths: `hakmem_tiny_hot_pop*.inc.h`, `tiny_fastcache.h`
- TLS layers: `hakmem_tiny_tls_list.h`, `hakmem_tiny_tls_ops.h`
- SuperSlab: `superslab_inline.h`, `tiny_superslab_*.inc.h`
- Refill: `hakmem_tiny_refill.inc.h`, `tiny_refill_opt.h`
- Free paths: `tiny_free_magazine.inc.h`, `tiny_superslab_free.inc.h`
- Documentation: Multiple Phase E3 reports
## Remaining Work
None for Box API offset bugs - all structural issues resolved.
Future enhancements (non-critical):
- Periodic `grep -R '*(void**)' core/` to detect direct pointer access violations
- Enforce Box API usage via static analysis
- Document offset rationale in architecture docs
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-13 06:50:20 +09:00
|
|
|
// Phase E1-CORRECT: Validate class range (all classes 0-7 valid)
|
2025-11-10 16:48:20 +09:00
|
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) {
|
|
|
|
|
return -1; // Invalid class
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return class_idx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ========== Registry Lookup ==========
|
|
|
|
|
|
|
|
|
|
// Lookup pointer in SuperSlab registry (fallback when header probe fails)
|
|
|
|
|
// Returns: classification result with SuperSlab + class_idx + slab_idx
|
|
|
|
|
//
|
|
|
|
|
// Performance: 50-100 cycles (hash lookup + validation)
|
|
|
|
|
static inline ptr_classification_t registry_lookup(void* ptr) {
|
|
|
|
|
ptr_classification_t result = {
|
|
|
|
|
.kind = PTR_KIND_UNKNOWN,
|
|
|
|
|
.class_idx = -1,
|
|
|
|
|
.ss = NULL,
|
|
|
|
|
.slab_idx = -1
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Query SuperSlab registry
|
|
|
|
|
struct SuperSlab* ss = hak_super_lookup(ptr);
|
2025-11-13 16:33:03 +09:00
|
|
|
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
|
2025-11-10 16:48:20 +09:00
|
|
|
// Not in Tiny registry
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-13 16:33:03 +09:00
|
|
|
// Found SuperSlab - determine slab index from ptr-1 (block base)
|
2025-11-10 16:48:20 +09:00
|
|
|
result.ss = ss;
|
|
|
|
|
|
|
|
|
|
uintptr_t ptr_addr = (uintptr_t)ptr;
|
|
|
|
|
uintptr_t ss_addr = (uintptr_t)ss;
|
2025-11-13 16:33:03 +09:00
|
|
|
if (ptr_addr <= ss_addr) {
|
2025-11-10 16:48:20 +09:00
|
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-13 16:33:03 +09:00
|
|
|
// Use block base for slab index to be consistent with free paths
|
|
|
|
|
uintptr_t base_addr = ptr_addr - 1;
|
|
|
|
|
size_t offset = base_addr - ss_addr;
|
|
|
|
|
int slab_idx = (int)(offset / SLAB_SIZE);
|
|
|
|
|
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
|
2025-11-10 16:48:20 +09:00
|
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-13 16:33:03 +09:00
|
|
|
result.slab_idx = slab_idx;
|
|
|
|
|
TinySlabMeta* meta = &ss->slabs[slab_idx];
|
|
|
|
|
int cls = (meta->class_idx < TINY_NUM_CLASSES) ? (int)meta->class_idx : -1;
|
|
|
|
|
result.class_idx = cls;
|
|
|
|
|
|
|
|
|
|
if (cls == 7) {
|
|
|
|
|
// 1KB headerless tiny
|
2025-11-10 18:04:08 +09:00
|
|
|
result.kind = PTR_KIND_TINY_HEADERLESS;
|
2025-11-13 16:33:03 +09:00
|
|
|
} else if (cls >= 0) {
|
|
|
|
|
// Other tiny classes with 1-byte header
|
2025-11-10 18:04:08 +09:00
|
|
|
result.kind = PTR_KIND_TINY_HEADER;
|
2025-11-13 16:33:03 +09:00
|
|
|
} else {
|
|
|
|
|
result.kind = PTR_KIND_UNKNOWN;
|
2025-11-10 18:04:08 +09:00
|
|
|
}
|
2025-11-10 16:48:20 +09:00
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ========== Pool TLS Probe ==========
|
|
|
|
|
|
|
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
2025-11-11 01:00:37 +09:00
|
|
|
// Registry-based Pool TLS probe (no memory deref)
|
|
|
|
|
static inline int is_pool_tls_reg(void* ptr) {
|
|
|
|
|
pid_t tid = 0; int cls = -1;
|
|
|
|
|
return pool_reg_lookup(ptr, &tid, &cls);
|
2025-11-10 16:48:20 +09:00
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// ========== Front Gate Entry Point ==========
|
|
|
|
|
|
|
|
|
|
ptr_classification_t classify_ptr(void* ptr) {
|
|
|
|
|
ptr_classification_t result = {
|
|
|
|
|
.kind = PTR_KIND_UNKNOWN,
|
|
|
|
|
.class_idx = -1,
|
|
|
|
|
.ss = NULL,
|
|
|
|
|
.slab_idx = -1
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (!ptr) return result;
|
2025-11-10 18:04:08 +09:00
|
|
|
// Early guard: reject non-canonical tiny integers to avoid ptr-1 probe crashes
|
|
|
|
|
if ((uintptr_t)ptr < 4096) {
|
|
|
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2025-11-10 16:48:20 +09:00
|
|
|
|
2025-11-14 18:20:35 +09:00
|
|
|
// ========== FAST PATH: Header-Based Classification ==========
|
|
|
|
|
// Performance: 2-5 cycles (vs 50-100 cycles for registry lookup)
|
|
|
|
|
// Rationale: Tiny (0xa0) and Pool TLS (0xb0) use distinct magic bytes
|
|
|
|
|
//
|
|
|
|
|
// Safety checks:
|
|
|
|
|
// 1. Same-page guard: header must be in same page as ptr
|
|
|
|
|
// 2. Magic validation: distinguish Tiny/Pool/Unknown
|
|
|
|
|
//
|
|
|
|
|
uintptr_t offset_in_page = (uintptr_t)ptr & 0xFFF;
|
|
|
|
|
if (offset_in_page >= 1) {
|
|
|
|
|
// Safe to read header (won't cross page boundary)
|
|
|
|
|
uint8_t header = *((uint8_t*)ptr - 1);
|
|
|
|
|
uint8_t magic = header & 0xF0;
|
|
|
|
|
|
2025-12-01 22:06:10 +09:00
|
|
|
// Fast path: Tiny allocation (magic = 0xa0) — guarded by Superslab registry
|
2025-11-14 18:20:35 +09:00
|
|
|
if (magic == HEADER_MAGIC) { // HEADER_MAGIC = 0xa0
|
|
|
|
|
int class_idx = header & HEADER_CLASS_MASK;
|
|
|
|
|
if (class_idx >= 0 && class_idx < TINY_NUM_CLASSES) {
|
2025-12-01 22:06:10 +09:00
|
|
|
SuperSlab* ss = hak_super_lookup(ptr);
|
|
|
|
|
if (ss && ss->magic == SUPERSLAB_MAGIC) {
|
|
|
|
|
result.kind = PTR_KIND_TINY_HEADER;
|
|
|
|
|
result.class_idx = class_idx;
|
|
|
|
|
result.ss = ss;
|
2025-11-14 18:20:35 +09:00
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
2025-12-01 22:06:10 +09:00
|
|
|
g_classify_header_hit++;
|
2025-11-14 18:20:35 +09:00
|
|
|
#endif
|
2025-12-01 22:06:10 +09:00
|
|
|
return result;
|
|
|
|
|
} else {
|
|
|
|
|
// Superslab未登録 → hakmem外。Tiny扱いしない。
|
|
|
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2025-11-14 18:20:35 +09:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
|
|
|
|
// Fast path: Pool TLS allocation (magic = 0xb0)
|
|
|
|
|
if (magic == 0xb0) { // POOL_MAGIC
|
|
|
|
|
result.kind = PTR_KIND_POOL_TLS;
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
g_classify_pool_hit++;
|
|
|
|
|
#endif
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ========== SLOW PATH: Registry Lookup (Fallback) ==========
|
|
|
|
|
// Used when:
|
|
|
|
|
// - ptr is page-aligned (offset_in_page == 0)
|
|
|
|
|
// - magic doesn't match Tiny/Pool (0xa0/0xb0)
|
|
|
|
|
// - Headerless allocations (C7 1KB class, if exists)
|
|
|
|
|
//
|
|
|
|
|
|
2025-11-10 16:48:20 +09:00
|
|
|
#ifdef HAKMEM_POOL_TLS_PHASE1
|
2025-11-14 18:20:35 +09:00
|
|
|
// Check Pool TLS registry (for page-aligned pointers)
|
2025-11-11 01:00:37 +09:00
|
|
|
if (is_pool_tls_reg(ptr)) {
|
2025-11-10 16:48:20 +09:00
|
|
|
result.kind = PTR_KIND_POOL_TLS;
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
g_classify_pool_hit++;
|
|
|
|
|
#endif
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2025-11-14 18:20:35 +09:00
|
|
|
// Registry lookup for Tiny (header or headerless)
|
2025-11-10 16:48:20 +09:00
|
|
|
result = registry_lookup(ptr);
|
|
|
|
|
if (result.kind == PTR_KIND_TINY_HEADERLESS) {
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
g_classify_headerless_hit++;
|
|
|
|
|
#endif
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2025-11-11 01:00:37 +09:00
|
|
|
if (result.kind == PTR_KIND_TINY_HEADER) {
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
g_classify_header_hit++;
|
|
|
|
|
#endif
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2025-11-10 16:48:20 +09:00
|
|
|
|
Phase 23 Unified Cache + PageFaultTelemetry generalization: Mid/VM page-fault bottleneck identified
Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-17 02:47:58 +09:00
|
|
|
// Check for Mid-Large allocation with AllocHeader (MMAP/POOL/L25_POOL)
|
|
|
|
|
// AllocHeader is placed before user pointer (user_ptr - HEADER_SIZE)
|
|
|
|
|
//
|
|
|
|
|
// Safety check: Need at least HEADER_SIZE (40 bytes) before ptr to read AllocHeader
|
|
|
|
|
// If ptr is too close to page start, skip this check (avoid SEGV)
|
|
|
|
|
uintptr_t offset_in_page_for_hdr = (uintptr_t)ptr & 0xFFF;
|
|
|
|
|
if (offset_in_page_for_hdr >= HEADER_SIZE) {
|
|
|
|
|
// Safe to read AllocHeader (won't cross page boundary)
|
|
|
|
|
AllocHeader* hdr = hak_header_from_user(ptr);
|
|
|
|
|
if (hak_header_validate(hdr)) {
|
|
|
|
|
// Valid HAKMEM header found
|
|
|
|
|
if (hdr->method == ALLOC_METHOD_MMAP ||
|
|
|
|
|
hdr->method == ALLOC_METHOD_POOL ||
|
|
|
|
|
hdr->method == ALLOC_METHOD_L25_POOL) {
|
|
|
|
|
result.kind = PTR_KIND_MID_LARGE;
|
|
|
|
|
result.ss = NULL;
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
g_classify_mid_large_hit++;
|
|
|
|
|
#endif
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-14 18:20:35 +09:00
|
|
|
// Unknown pointer (external allocation or Mid/Large)
|
|
|
|
|
// Let free wrapper handle Mid/Large registry lookups
|
2025-11-10 16:48:20 +09:00
|
|
|
result.kind = PTR_KIND_UNKNOWN;
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
g_classify_unknown_hit++;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|