## Summary
Implemented Front-Direct architecture with complete SLL bypass:
- Direct SuperSlab → FastCache refill (1-hop, bypasses SLL)
- SLL-free allocation/free paths when Front-Direct enabled
- Legacy path sealing (SLL inline opt-in, SFC cascade ENV-only)
## New Modules
- core/refill/ss_refill_fc.h (236 lines): Standard SS→FC refill entry point
- Remote drain → Freelist → Carve priority
- Header restoration for C1-C6 (NOT C0/C7)
- ENV: HAKMEM_TINY_P0_DRAIN_THRESH, HAKMEM_TINY_P0_NO_DRAIN
- core/front/fast_cache.h: FastCache (L1) type definition
- core/front/quick_slot.h: QuickSlot (L0) type definition
## Allocation Path (core/tiny_alloc_fast.inc.h)
- Added s_front_direct_alloc TLS flag (lazy ENV check)
- SLL pop guarded by: g_tls_sll_enable && !s_front_direct_alloc
- Refill dispatch:
- Front-Direct: ss_refill_fc_fill() → fastcache_pop() (1-hop)
- Legacy: sll_refill_batch_from_ss() → SLL → FC (2-hop, A/B only)
- SLL inline pop sealed (requires HAKMEM_TINY_INLINE_SLL=1 opt-in)
## Free Path (core/hakmem_tiny_free.inc, core/hakmem_tiny_fastcache.inc.h)
- FC priority: Try fastcache_push() first (same-thread free)
- tiny_fast_push() bypass: Returns 0 when s_front_direct_free || !g_tls_sll_enable
- Fallback: Magazine/slow path (safe, bypasses SLL)
## Legacy Sealing
- SFC cascade: Default OFF (ENV-only via HAKMEM_TINY_SFC_CASCADE=1)
- Deleted: core/hakmem_tiny_free.inc.bak, core/pool_refill_legacy.c.bak
- Documentation: ss_refill_fc_fill() promoted as CANONICAL refill entry
## ENV Controls
- HAKMEM_TINY_FRONT_DIRECT=1: Enable Front-Direct (SS→FC direct)
- HAKMEM_TINY_P0_DIRECT_FC_ALL=1: Same as above (alt name)
- HAKMEM_TINY_REFILL_BATCH=1: Enable batch refill (also enables Front-Direct)
- HAKMEM_TINY_SFC_CASCADE=1: Enable SFC cascade (default OFF)
- HAKMEM_TINY_INLINE_SLL=1: Enable inline SLL pop (default OFF, requires AGGRESSIVE_INLINE)
## Benchmarks (Front-Direct Enabled)
```bash
ENV: HAKMEM_BENCH_FAST_FRONT=1 HAKMEM_TINY_FRONT_DIRECT=1
HAKMEM_TINY_REFILL_BATCH=1 HAKMEM_TINY_P0_DIRECT_FC_ALL=1
HAKMEM_TINY_REFILL_COUNT_HOT=256 HAKMEM_TINY_REFILL_COUNT_MID=96
HAKMEM_TINY_BUMP_CHUNK=256
bench_random_mixed (16-1040B random, 200K iter):
256 slots: 1.44M ops/s (STABLE, 0 SEGV)
128 slots: 1.44M ops/s (STABLE, 0 SEGV)
bench_fixed_size (fixed size, 200K iter):
256B: 4.06M ops/s (has debug logs, expected >10M without logs)
128B: Similar (debug logs affect)
```
## Verification
- TRACE_RING test (10K iter): **0 SLL events** detected ✅
- Complete SLL bypass confirmed when Front-Direct=1
- Stable execution: 200K iterations × multiple sizes, 0 SEGV
## Next Steps
- Disable debug logs in hak_alloc_api.inc.h (call_num 14250-14280 range)
- Re-benchmark with clean Release build (target: 10-15M ops/s)
- 128/256B shortcut path optimization (FC hit rate improvement)
Co-Authored-By: ChatGPT <chatgpt@openai.com>
Suggested-By: ultrathink
215 lines
8.0 KiB
C
215 lines
8.0 KiB
C
// hakmem_tiny_fastcache.inc.h
|
|
// Phase 2D-1: Hot-path inline functions - Fast cache and quick slot operations
|
|
//
|
|
// This file contains fast cache and quick slot inline functions.
|
|
// These functions are extracted from hakmem_tiny.c to improve maintainability and
|
|
// reduce the main file size by approximately 53 lines.
|
|
//
|
|
// Functions handle:
|
|
// - tiny_fast_pop/push: Fast TLS cache operations (lines 377-404)
|
|
// - fastcache_pop/push: Frontend fast cache (lines 873-888)
|
|
// - quick_pop: Quick slot pop operation (line 892-896)
|
|
|
|
#ifndef HAKMEM_TINY_FASTCACHE_INC_H
|
|
#define HAKMEM_TINY_FASTCACHE_INC_H
|
|
|
|
#include "hakmem_tiny.h"
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include <stdatomic.h>
|
|
#include "tiny_remote.h" // For TINY_REMOTE_SENTINEL detection
|
|
#include "box/tiny_next_ptr_box.h" // For tiny_next_read(class_idx, )
|
|
|
|
// External TLS variables
|
|
extern int g_fast_enable;
|
|
extern uint16_t g_fast_cap[TINY_NUM_CLASSES];
|
|
extern __thread void* g_fast_head[TINY_NUM_CLASSES];
|
|
extern __thread uint16_t g_fast_count[TINY_NUM_CLASSES];
|
|
|
|
// Fast cache capacity
|
|
#define TINY_FASTCACHE_CAP 128
|
|
|
|
// Quick slot capacity
|
|
#define QUICK_CAP 6
|
|
|
|
// External variable declarations
|
|
// Note: TinyFastCache and TinyQuickSlot types must be defined before including this file
|
|
extern int g_fastcache_enable;
|
|
extern __thread TinyFastCache g_fast_cache[TINY_NUM_CLASSES];
|
|
extern int g_quick_enable;
|
|
extern __thread TinyQuickSlot g_tls_quick[TINY_NUM_CLASSES];
|
|
extern unsigned long long g_free_via_fastcache[];
|
|
extern unsigned long long g_fast_push_hits[];
|
|
extern unsigned long long g_fast_push_full[];
|
|
extern unsigned long long g_fast_push_disabled[];
|
|
extern unsigned long long g_fast_push_zero_cap[];
|
|
|
|
static int g_fast_debug_mode = -1;
|
|
static int g_fast_debug_limit = 8;
|
|
static _Atomic int g_fast_debug_seen[TINY_NUM_CLASSES];
|
|
|
|
static inline void tiny_fast_debug_log(int class_idx, const char* event, uint16_t count, uint16_t cap) {
|
|
if (__builtin_expect(g_fast_debug_mode == -1, 0)) {
|
|
const char* e = getenv("HAKMEM_TINY_FAST_DEBUG");
|
|
g_fast_debug_mode = (e && atoi(e) != 0) ? 1 : 0;
|
|
const char* limit_env = getenv("HAKMEM_TINY_FAST_DEBUG_MAX");
|
|
if (limit_env && *limit_env) {
|
|
int v = atoi(limit_env);
|
|
if (v > 0) g_fast_debug_limit = v;
|
|
}
|
|
}
|
|
if (!g_fast_debug_mode) return;
|
|
int limit = g_fast_debug_limit;
|
|
if (limit <= 0) limit = 8;
|
|
int seen = atomic_fetch_add_explicit(&g_fast_debug_seen[class_idx], 1, memory_order_relaxed);
|
|
if (seen < limit) {
|
|
fprintf(stderr, "[FASTDBG] class=%d event=%s count=%u cap=%u\n",
|
|
class_idx, event, (unsigned)count, (unsigned)cap);
|
|
}
|
|
}
|
|
|
|
// Tracepoint macros (no-op if not defined)
|
|
#ifndef HAK_TP1
|
|
#define HAK_TP1(name, idx) do { (void)(idx); } while(0)
|
|
#endif
|
|
|
|
// Basic fast cache operations
|
|
static inline __attribute__((always_inline)) void* tiny_fast_pop(int class_idx) {
|
|
if (!g_fast_enable) return NULL;
|
|
uint16_t cap = g_fast_cap[class_idx];
|
|
if (cap == 0) return NULL;
|
|
void* head = g_fast_head[class_idx];
|
|
if (!head) return NULL;
|
|
// Phase 7: header-aware next pointer (C0-C6: base+1, C7: base)
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
// Phase E1-CORRECT: ALL classes have 1-byte header, next ptr at offset 1
|
|
const size_t next_offset = 1;
|
|
#else
|
|
const size_t next_offset = 0;
|
|
#endif
|
|
// Phase E1-CORRECT: Use Box API for next pointer read (ALL classes: base+1)
|
|
#include "box/tiny_next_ptr_box.h"
|
|
void* next = tiny_next_read(class_idx, head);
|
|
g_fast_head[class_idx] = next;
|
|
uint16_t count = g_fast_count[class_idx];
|
|
if (count > 0) {
|
|
g_fast_count[class_idx] = (uint16_t)(count - 1);
|
|
} else {
|
|
g_fast_count[class_idx] = 0;
|
|
}
|
|
// Phase E1-CORRECT: Return BASE pointer; caller (HAK_RET_ALLOC) performs BASE→USER
|
|
return head;
|
|
}
|
|
|
|
static inline __attribute__((always_inline)) int tiny_fast_push(int class_idx, void* ptr) {
|
|
// NEW: Check Front-Direct/SLL-OFF bypass (priority check before any work)
|
|
static __thread int s_front_direct_free = -1;
|
|
if (__builtin_expect(s_front_direct_free == -1, 0)) {
|
|
const char* e = getenv("HAKMEM_TINY_FRONT_DIRECT");
|
|
s_front_direct_free = (e && *e && *e != '0') ? 1 : 0;
|
|
}
|
|
|
|
// If Front-Direct OR SLL disabled, bypass tiny_fast (which uses TLS SLL)
|
|
extern int g_tls_sll_enable;
|
|
if (__builtin_expect(s_front_direct_free || !g_tls_sll_enable, 0)) {
|
|
return 0; // Bypass TLS SLL entirely → route to magazine/slow path
|
|
}
|
|
|
|
// ✅ CRITICAL FIX: Prevent sentinel-poisoned nodes from entering fast cache
|
|
// Remote free operations can write SENTINEL to node->next, which eventually
|
|
// propagates through freelist → TLS list → fast cache. If we push such a node,
|
|
// the next pop will try to dereference the sentinel → SEGV!
|
|
if (__builtin_expect((uintptr_t)ptr == TINY_REMOTE_SENTINEL, 0)) {
|
|
static __thread int sentinel_ptr_logged = 0;
|
|
if (sentinel_ptr_logged < 5) {
|
|
fprintf(stderr, "[FAST_PUSH_SENTINEL] cls=%d ptr=%p BLOCKED (ptr is sentinel)!\n",
|
|
class_idx, ptr);
|
|
sentinel_ptr_logged++;
|
|
}
|
|
return 0; // Reject push
|
|
}
|
|
|
|
// ✅ CRITICAL FIX #2: Also check if node's NEXT pointer is sentinel (defense-in-depth)
|
|
// This catches nodes that have sentinel in their next field (from remote free)
|
|
void* next_check = tiny_next_read(class_idx, ptr);
|
|
if (__builtin_expect((uintptr_t)next_check == TINY_REMOTE_SENTINEL, 0)) {
|
|
static __thread int sentinel_next_logged = 0;
|
|
if (sentinel_next_logged < 5) {
|
|
fprintf(stderr, "[FAST_PUSH_NEXT_SENTINEL] cls=%d ptr=%p next=%p BLOCKED (next is sentinel)!\n",
|
|
class_idx, ptr, next_check);
|
|
sentinel_next_logged++;
|
|
}
|
|
return 0; // Reject push
|
|
}
|
|
|
|
if (!g_fast_enable) {
|
|
g_fast_push_disabled[class_idx]++;
|
|
tiny_fast_debug_log(class_idx, "disabled", 0, 0);
|
|
return 0;
|
|
}
|
|
uint16_t cap = g_fast_cap[class_idx];
|
|
if (cap == 0) {
|
|
g_fast_push_zero_cap[class_idx]++;
|
|
tiny_fast_debug_log(class_idx, "zero_cap", g_fast_count[class_idx], cap);
|
|
return 0;
|
|
}
|
|
uint16_t count = g_fast_count[class_idx];
|
|
if (count >= cap) {
|
|
g_fast_push_full[class_idx]++;
|
|
tiny_fast_debug_log(class_idx, "full", count, cap);
|
|
return 0;
|
|
}
|
|
// Phase 7: header-aware next pointer (C0-C6: base+1, C7: base)
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
// Phase E1-CORRECT: ALL classes have 1-byte header, next ptr at offset 1
|
|
const size_t next_offset2 = 1;
|
|
#else
|
|
const size_t next_offset2 = 0;
|
|
#endif
|
|
// Phase E1-CORRECT: Use Box API for next pointer write (ALL classes: base+1)
|
|
#include "box/tiny_next_ptr_box.h"
|
|
tiny_next_write(class_idx, ptr, g_fast_head[class_idx]);
|
|
g_fast_head[class_idx] = ptr;
|
|
g_fast_count[class_idx] = (uint16_t)(count + 1);
|
|
g_fast_push_hits[class_idx]++;
|
|
tiny_fast_debug_log(class_idx, "hit", (uint16_t)(count + 1), cap);
|
|
return 1;
|
|
}
|
|
|
|
// Frontend fast cache operations
|
|
static inline void* fastcache_pop(int class_idx) {
|
|
TinyFastCache* fc = &g_fast_cache[class_idx];
|
|
if (__builtin_expect(fc->top > 0, 1)) {
|
|
void* base = fc->items[--fc->top];
|
|
// ✅ FIX #16: Return BASE pointer (not USER)
|
|
// FastCache stores base pointers. Caller will apply HAK_RET_ALLOC
|
|
// which does BASE → USER conversion via tiny_region_id_write_header
|
|
return base;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static inline int fastcache_push(int class_idx, void* ptr) {
|
|
TinyFastCache* fc = &g_fast_cache[class_idx];
|
|
if (__builtin_expect(fc->top < TINY_FASTCACHE_CAP, 1)) {
|
|
fc->items[fc->top++] = ptr;
|
|
g_free_via_fastcache[class_idx]++;
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
// Quick slot pop operation
|
|
static inline void* quick_pop(int class_idx) {
|
|
TinyQuickSlot* qs = &g_tls_quick[class_idx];
|
|
if (__builtin_expect(qs->top > 0, 1)) {
|
|
void* p = qs->items[--qs->top];
|
|
HAK_TP1(quick_pop, class_idx);
|
|
return p;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
#endif // HAKMEM_TINY_FASTCACHE_INC_H
|