Summary:
- Phase 23 Unified Cache: +30% improvement (Random Mixed 256B: 18.18M → 23.68M ops/s)
- PageFaultTelemetry: Extended to generic buckets (C0-C7, MID, L25, SSM)
- Measurement-driven decision: Mid/VM page-faults (80-100K) >> Tiny (6K) → prioritize Mid/VM optimization
Phase 23 Changes:
1. Unified Cache implementation (core/front/tiny_unified_cache.{c,h})
- Direct SuperSlab carve (TLS SLL bypass)
- Self-contained pop-or-refill pattern
- ENV: HAKMEM_TINY_UNIFIED_CACHE=1, HAKMEM_TINY_UNIFIED_C{0-7}=128
2. Fast path pruning (tiny_alloc_fast.inc.h, tiny_free_fast_v2.inc.h)
- Unified ON → direct cache access (skip all intermediate layers)
- Alloc: unified_cache_pop_or_refill() → immediate fail to slow
- Free: unified_cache_push() → fallback to SLL only if full
PageFaultTelemetry Changes:
3. Generic bucket architecture (core/box/pagefault_telemetry_box.{c,h})
- PF_BUCKET_{C0-C7, MID, L25, SSM} for domain-specific measurement
- Integration: hak_pool_try_alloc(), l25_alloc_new_run(), shared_pool_allocate_superslab_unlocked()
4. Measurement results (Random Mixed 500K / 256B):
- Tiny C2-C7: 2-33 pages, high reuse (64-3.8 touches/page)
- SSM: 512 pages (initialization footprint)
- MID/L25: 0 (unused in this workload)
- Mid/Large VM benchmarks: 80-100K page-faults (13-16x higher than Tiny)
Ring Cache Enhancements:
5. Hot Ring Cache (core/front/tiny_ring_cache.{c,h})
- ENV: HAKMEM_TINY_HOT_RING_ENABLE=1, HAKMEM_TINY_HOT_RING_C{0-7}=size
- Conditional compilation cleanup
Documentation:
6. Analysis reports
- RANDOM_MIXED_BOTTLENECK_ANALYSIS.md: Page-fault breakdown
- RANDOM_MIXED_SUMMARY.md: Phase 23 summary
- RING_CACHE_ACTIVATION_GUIDE.md: Ring cache usage
- CURRENT_TASK.md: Updated with Phase 23 results and Phase 24 plan
Next Steps (Phase 24):
- Target: Mid/VM PageArena/HotSpanBox (page-fault reduction 80-100K → 30-40K)
- Tiny SSM optimization deferred (low ROI, ~6K page-faults already optimal)
- Expected improvement: +30-50% for Mid/Large workloads
Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
264 lines
9.0 KiB
C
264 lines
9.0 KiB
C
// tiny_ring_cache.h - Phase 21-1: Array-based hot cache (C2/C3/C5)
|
|
//
|
|
// Goal: Eliminate pointer chasing in TLS SLL by using ring buffer
|
|
// Target: +15-20% performance (54.4M → 62-65M ops/s)
|
|
//
|
|
// Design (ChatGPT feedback):
|
|
// - Ring → SLL → SuperSlab (3-layer hierarchy)
|
|
// - Ring size: 128 slots (ENV: 64/128/256 A/B test)
|
|
// - C2/C3 only (hot classes, 33-128B)
|
|
// - Replaces UltraHot (Phase 19-3: +12.9% by removing UltraHot)
|
|
//
|
|
// Performance:
|
|
// - Alloc: 1-2 instructions (array access, no pointer chasing)
|
|
// - Free: 1-2 instructions (array write, no pointer chasing)
|
|
// - vs TLS SLL: 3 mem accesses → 2 mem accesses, 1 cache miss → 0
|
|
//
|
|
// ENV Variables:
|
|
// HAKMEM_TINY_HOT_RING_ENABLE=1 # Enable Ring cache (default: 0)
|
|
// HAKMEM_TINY_HOT_RING_C2=128 # C2 ring size (default: 128)
|
|
// HAKMEM_TINY_HOT_RING_C3=128 # C3 ring size (default: 128)
|
|
// HAKMEM_TINY_HOT_RING_CASCADE=1 # Enable SLL → Ring refill (default: 0)
|
|
|
|
#ifndef HAK_FRONT_TINY_RING_CACHE_H
|
|
#define HAK_FRONT_TINY_RING_CACHE_H
|
|
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include "../hakmem_build_flags.h"
|
|
|
|
// ============================================================================
|
|
// Ring Buffer Structure
|
|
// ============================================================================
|
|
|
|
typedef struct {
|
|
void** slots; // Dynamic array (allocated at init, power-of-2 size)
|
|
uint16_t head; // Pop index (consumer)
|
|
uint16_t tail; // Push index (producer)
|
|
uint16_t capacity; // Ring size (power of 2 for fast modulo: & (capacity-1))
|
|
uint16_t mask; // Capacity - 1 (for fast modulo)
|
|
} TinyRingCache;
|
|
|
|
// ============================================================================
|
|
// External TLS Variables (defined in tiny_ring_cache.c)
|
|
// ============================================================================
|
|
|
|
extern __thread TinyRingCache g_ring_cache_c2;
|
|
extern __thread TinyRingCache g_ring_cache_c3;
|
|
extern __thread TinyRingCache g_ring_cache_c5;
|
|
|
|
// ============================================================================
|
|
// Metrics (Phase 21-1-E, optional for Phase 21-1-C)
|
|
// ============================================================================
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
extern __thread uint64_t g_ring_cache_hit[8]; // Alloc hits
|
|
extern __thread uint64_t g_ring_cache_miss[8]; // Alloc misses
|
|
extern __thread uint64_t g_ring_cache_push[8]; // Free pushes
|
|
extern __thread uint64_t g_ring_cache_full[8]; // Free full (fallback to SLL)
|
|
extern __thread uint64_t g_ring_cache_refill[8]; // Refill count (SLL → Ring)
|
|
#endif
|
|
|
|
// ============================================================================
|
|
// ENV Control (cached, lazy init)
|
|
// ============================================================================
|
|
|
|
// Enable flag (default: 1, ON)
|
|
static inline int ring_cache_enabled(void) {
|
|
static int g_enable = -1;
|
|
if (__builtin_expect(g_enable == -1, 0)) {
|
|
const char* e = getenv("HAKMEM_TINY_HOT_RING_ENABLE");
|
|
g_enable = (e && *e == '0') ? 0 : 1; // DEFAULT: ON (set ENV=0 to disable)
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
if (g_enable) {
|
|
fprintf(stderr, "[Ring-INIT] ring_cache_enabled() = %d\n", g_enable);
|
|
fflush(stderr);
|
|
}
|
|
#endif
|
|
}
|
|
return g_enable;
|
|
}
|
|
|
|
// C2 capacity (default: 128)
|
|
static inline size_t ring_capacity_c2(void) {
|
|
static size_t g_cap = 0;
|
|
if (__builtin_expect(g_cap == 0, 0)) {
|
|
const char* e = getenv("HAKMEM_TINY_HOT_RING_C2");
|
|
g_cap = (e && *e) ? (size_t)atoi(e) : 128; // Default: 128
|
|
|
|
// Round up to power of 2 (for fast modulo)
|
|
if (g_cap < 32) g_cap = 32;
|
|
if (g_cap > 256) g_cap = 256;
|
|
|
|
// Ensure power of 2
|
|
size_t pow2 = 32;
|
|
while (pow2 < g_cap) pow2 *= 2;
|
|
g_cap = pow2;
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
fprintf(stderr, "[Ring-INIT] C2 capacity = %zu (power of 2)\n", g_cap);
|
|
fflush(stderr);
|
|
#endif
|
|
}
|
|
return g_cap;
|
|
}
|
|
|
|
// C3 capacity (default: 128)
|
|
static inline size_t ring_capacity_c3(void) {
|
|
static size_t g_cap = 0;
|
|
if (__builtin_expect(g_cap == 0, 0)) {
|
|
const char* e = getenv("HAKMEM_TINY_HOT_RING_C3");
|
|
g_cap = (e && *e) ? (size_t)atoi(e) : 128; // Default: 128
|
|
|
|
// Round up to power of 2
|
|
if (g_cap < 32) g_cap = 32;
|
|
if (g_cap > 256) g_cap = 256;
|
|
|
|
size_t pow2 = 32;
|
|
while (pow2 < g_cap) pow2 *= 2;
|
|
g_cap = pow2;
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
fprintf(stderr, "[Ring-INIT] C3 capacity = %zu (power of 2)\n", g_cap);
|
|
fflush(stderr);
|
|
#endif
|
|
}
|
|
return g_cap;
|
|
}
|
|
|
|
// C5 capacity (default: 128)
|
|
static inline size_t ring_capacity_c5(void) {
|
|
static size_t g_cap = 0;
|
|
if (__builtin_expect(g_cap == 0, 0)) {
|
|
const char* e = getenv("HAKMEM_TINY_HOT_RING_C5");
|
|
g_cap = (e && *e) ? (size_t)atoi(e) : 128; // Default: 128
|
|
|
|
// Round up to power of 2
|
|
if (g_cap < 32) g_cap = 32;
|
|
if (g_cap > 256) g_cap = 256;
|
|
|
|
size_t pow2 = 32;
|
|
while (pow2 < g_cap) pow2 *= 2;
|
|
g_cap = pow2;
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
fprintf(stderr, "[Ring-INIT] C5 capacity = %zu (power of 2)\n", g_cap);
|
|
fflush(stderr);
|
|
#endif
|
|
}
|
|
return g_cap;
|
|
}
|
|
|
|
// Cascade enable flag (default: 0, OFF)
|
|
static inline int ring_cascade_enabled(void) {
|
|
static int g_enable = -1;
|
|
if (__builtin_expect(g_enable == -1, 0)) {
|
|
const char* e = getenv("HAKMEM_TINY_HOT_RING_CASCADE");
|
|
g_enable = (e && *e && *e != '0') ? 1 : 0;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
if (g_enable) {
|
|
fprintf(stderr, "[Ring-INIT] ring_cascade_enabled() = %d\n", g_enable);
|
|
fflush(stderr);
|
|
}
|
|
#endif
|
|
}
|
|
return g_enable;
|
|
}
|
|
|
|
// ============================================================================
|
|
// Init/Shutdown Forward Declarations (needed by pop/push)
|
|
// ============================================================================
|
|
|
|
void ring_cache_init(void);
|
|
void ring_cache_shutdown(void);
|
|
void ring_cache_print_stats(void);
|
|
|
|
// ============================================================================
|
|
// Ultra-Fast Pop/Push (1-2 instructions)
|
|
// ============================================================================
|
|
|
|
// Pop from ring (alloc fast path)
|
|
// Returns: BASE pointer (caller must convert to USER with +1)
|
|
static inline void* ring_cache_pop(int class_idx) {
|
|
// Fast path: Ring disabled or wrong class → return NULL immediately
|
|
if (__builtin_expect(!ring_cache_enabled(), 0)) return NULL;
|
|
if (__builtin_expect(class_idx != 2 && class_idx != 3 && class_idx != 5, 0)) return NULL;
|
|
|
|
TinyRingCache* ring = (class_idx == 2) ? &g_ring_cache_c2 :
|
|
(class_idx == 3) ? &g_ring_cache_c3 : &g_ring_cache_c5;
|
|
|
|
// Lazy init check (once per thread)
|
|
if (__builtin_expect(ring->slots == NULL, 0)) {
|
|
ring_cache_init(); // First call in this thread
|
|
// Re-check after init (may fail if allocation failed)
|
|
if (ring->slots == NULL) return NULL;
|
|
}
|
|
|
|
// Empty check
|
|
if (__builtin_expect(ring->head == ring->tail, 0)) {
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_ring_cache_miss[class_idx]++;
|
|
#endif
|
|
return NULL; // Empty
|
|
}
|
|
|
|
// Pop from head (consumer)
|
|
void* base = ring->slots[ring->head];
|
|
ring->head = (ring->head + 1) & ring->mask; // Fast modulo (power of 2)
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_ring_cache_hit[class_idx]++;
|
|
#endif
|
|
|
|
return base; // Return BASE pointer
|
|
}
|
|
|
|
// Push to ring (free fast path)
|
|
// Input: BASE pointer (caller must pass BASE, not USER)
|
|
// Returns: 1=SUCCESS, 0=FULL
|
|
static inline int ring_cache_push(int class_idx, void* base) {
|
|
// Fast path: Ring disabled or wrong class → return 0 (not handled)
|
|
if (__builtin_expect(!ring_cache_enabled(), 0)) return 0;
|
|
if (__builtin_expect(class_idx != 2 && class_idx != 3 && class_idx != 5, 0)) return 0;
|
|
|
|
TinyRingCache* ring = (class_idx == 2) ? &g_ring_cache_c2 :
|
|
(class_idx == 3) ? &g_ring_cache_c3 : &g_ring_cache_c5;
|
|
|
|
// Lazy init check (once per thread)
|
|
if (__builtin_expect(ring->slots == NULL, 0)) {
|
|
ring_cache_init(); // First call in this thread
|
|
// Re-check after init (may fail if allocation failed)
|
|
if (ring->slots == NULL) return 0;
|
|
}
|
|
|
|
uint16_t next_tail = (ring->tail + 1) & ring->mask;
|
|
|
|
// Full check (leave 1 slot empty to distinguish full/empty)
|
|
if (__builtin_expect(next_tail == ring->head, 0)) {
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_ring_cache_full[class_idx]++;
|
|
#endif
|
|
return 0; // Full
|
|
}
|
|
|
|
// Push to tail (producer)
|
|
ring->slots[ring->tail] = base;
|
|
ring->tail = next_tail;
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
g_ring_cache_push[class_idx]++;
|
|
#endif
|
|
|
|
return 1; // SUCCESS
|
|
}
|
|
|
|
// ============================================================================
|
|
// Refill from TLS SLL (cascade, Phase 21-1-C)
|
|
// ============================================================================
|
|
|
|
// Forward declaration (defined in tiny_ring_cache.c)
|
|
int ring_refill_from_sll(int class_idx, int target_count);
|
|
|
|
#endif // HAK_FRONT_TINY_RING_CACHE_H
|