Files
hakmem/core/hakmem_tiny_fastcache.inc.h
Moe Charm (CI) 1bbfb53925 Implement Phantom typing for Tiny FastCache layer
Refactor FastCache and TLS cache APIs to use Phantom types (hak_base_ptr_t)
for compile-time type safety, preventing BASE/USER pointer confusion.

Changes:
1. core/hakmem_tiny_fastcache.inc.h:
   - fastcache_pop() returns hak_base_ptr_t instead of void*
   - fastcache_push() accepts hak_base_ptr_t instead of void*

2. core/hakmem_tiny.c:
   - Updated forward declarations to match new signatures

3. core/tiny_alloc_fast.inc.h, core/hakmem_tiny_alloc.inc:
   - Alloc paths now use hak_base_ptr_t for cache operations
   - BASE->USER conversion via HAK_RET_ALLOC macro

4. core/hakmem_tiny_refill.inc.h, core/refill/ss_refill_fc.h:
   - Refill paths properly handle BASE pointer types
   - Fixed: Removed unnecessary HAK_BASE_FROM_RAW() in ss_refill_fc.h line 176

5. core/hakmem_tiny_free.inc, core/tiny_free_magazine.inc.h:
   - Free paths convert USER->BASE before cache push
   - USER->BASE conversion via HAK_USER_TO_BASE or ptr_user_to_base()

6. core/hakmem_tiny_legacy_slow_box.inc:
   - Legacy path properly wraps pointers for cache API

Benefits:
- Type safety at compile time (in debug builds)
- Zero runtime overhead (debug builds only, release builds use typedef=void*)
- All BASE->USER conversions verified via Task analysis
- Prevents pointer type confusion bugs

Testing:
- Build: SUCCESS (all 9 files)
- Smoke test: PASS (sh8bench runs to completion)
- Conversion path verification: 3/3 paths correct

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-04 11:05:06 +09:00

225 lines
8.6 KiB
C

// hakmem_tiny_fastcache.inc.h
// Phase 2D-1: Hot-path inline functions - Fast cache and quick slot operations
//
// This file contains fast cache and quick slot inline functions.
// These functions are extracted from hakmem_tiny.c to improve maintainability and
// reduce the main file size by approximately 53 lines.
//
// Functions handle:
// - tiny_fast_pop/push: Fast TLS cache operations (lines 377-404)
// - fastcache_pop/push: Frontend fast cache (lines 873-888)
// - quick_pop: Quick slot pop operation (line 892-896)
#ifndef HAKMEM_TINY_FASTCACHE_INC_H
#define HAKMEM_TINY_FASTCACHE_INC_H
#include "hakmem_tiny.h"
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdatomic.h>
#include "tiny_remote.h" // For TINY_REMOTE_SENTINEL detection
#include "box/tiny_next_ptr_box.h" // For tiny_next_read(class_idx, )
// External TLS variables
extern int g_fast_enable;
extern uint16_t g_fast_cap[TINY_NUM_CLASSES];
extern __thread void* g_fast_head[TINY_NUM_CLASSES];
extern __thread uint16_t g_fast_count[TINY_NUM_CLASSES];
// Fast cache capacity
#define TINY_FASTCACHE_CAP 128
// Quick slot capacity
#define QUICK_CAP 6
// External variable declarations
// Note: TinyFastCache and TinyQuickSlot types must be defined before including this file
extern int g_fastcache_enable;
extern __thread TinyFastCache g_fast_cache[TINY_NUM_CLASSES];
extern int g_quick_enable;
extern __thread TinyQuickSlot g_tls_quick[TINY_NUM_CLASSES];
extern unsigned long long g_free_via_fastcache[];
extern unsigned long long g_fast_push_hits[];
extern unsigned long long g_fast_push_full[];
extern unsigned long long g_fast_push_disabled[];
extern unsigned long long g_fast_push_zero_cap[];
#if !HAKMEM_BUILD_RELEASE
static int g_fast_debug_mode = -1;
static int g_fast_debug_limit = 8;
static _Atomic int g_fast_debug_seen[TINY_NUM_CLASSES];
static inline void tiny_fast_debug_log(int class_idx, const char* event, uint16_t count, uint16_t cap) {
if (__builtin_expect(g_fast_debug_mode == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FAST_DEBUG");
g_fast_debug_mode = (e && atoi(e) != 0) ? 1 : 0;
const char* limit_env = getenv("HAKMEM_TINY_FAST_DEBUG_MAX");
if (limit_env && *limit_env) {
int v = atoi(limit_env);
if (v > 0) g_fast_debug_limit = v;
}
}
if (!g_fast_debug_mode) return;
int limit = g_fast_debug_limit;
if (limit <= 0) limit = 8;
int seen = atomic_fetch_add_explicit(&g_fast_debug_seen[class_idx], 1, memory_order_relaxed);
if (seen < limit) {
fprintf(stderr, "[FASTDBG] class=%d event=%s count=%u cap=%u\n",
class_idx, event, (unsigned)count, (unsigned)cap);
}
}
#else
static inline void tiny_fast_debug_log(int class_idx, const char* event, uint16_t count, uint16_t cap) {
(void)class_idx; (void)event; (void)count; (void)cap;
}
#endif
// Tracepoint macros (no-op if not defined)
#ifndef HAK_TP1
#define HAK_TP1(name, idx) do { (void)(idx); } while(0)
#endif
// Basic fast cache operations
// NOTE: These APIs conceptually operate on BASE pointers.
// Interfaces use hak_base_ptr_t for type-safety; storage remains void*.
static inline __attribute__((always_inline)) hak_base_ptr_t tiny_fast_pop(int class_idx) {
if (!g_fast_enable) return HAK_BASE_FROM_RAW(NULL);
uint16_t cap = g_fast_cap[class_idx];
if (cap == 0) return HAK_BASE_FROM_RAW(NULL);
void* head = g_fast_head[class_idx];
if (!head) return HAK_BASE_FROM_RAW(NULL);
// Phase 7: header-aware next pointer (C0-C6: base+1, C7: base)
#if HAKMEM_TINY_HEADER_CLASSIDX
// Phase E1-CORRECT: ALL classes have 1-byte header, next ptr at offset 1
const size_t next_offset = 1;
#else
const size_t next_offset = 0;
#endif
// Phase E1-CORRECT: Use Box API for next pointer read (ALL classes: base+1)
#include "box/tiny_next_ptr_box.h"
void* next = tiny_next_read(class_idx, head);
g_fast_head[class_idx] = next;
uint16_t count = g_fast_count[class_idx];
if (count > 0) {
g_fast_count[class_idx] = (uint16_t)(count - 1);
} else {
g_fast_count[class_idx] = 0;
}
// Phase E1-CORRECT: Return BASE pointer; caller (HAK_RET_ALLOC) performs BASE→USER
return HAK_BASE_FROM_RAW(head);
}
static inline __attribute__((always_inline)) int tiny_fast_push(int class_idx, hak_base_ptr_t base) {
void* ptr = HAK_BASE_TO_RAW(base);
// NEW: Check Front-Direct/SLL-OFF bypass (priority check before any work)
static __thread int s_front_direct_free = -1;
if (__builtin_expect(s_front_direct_free == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FRONT_DIRECT");
s_front_direct_free = (e && *e && *e != '0') ? 1 : 0;
}
// If Front-Direct OR SLL disabled, bypass tiny_fast (which uses TLS SLL)
extern int g_tls_sll_enable;
if (__builtin_expect(s_front_direct_free || !g_tls_sll_enable, 0)) {
return 0; // Bypass TLS SLL entirely → route to magazine/slow path
}
// ✅ CRITICAL FIX: Prevent sentinel-poisoned nodes from entering fast cache
// Remote free operations can write SENTINEL to node->next, which eventually
// propagates through freelist → TLS list → fast cache. If we push such a node,
// the next pop will try to dereference the sentinel → SEGV!
if (__builtin_expect((uintptr_t)ptr == TINY_REMOTE_SENTINEL, 0)) {
static __thread int sentinel_ptr_logged = 0;
if (sentinel_ptr_logged < 5) {
fprintf(stderr, "[FAST_PUSH_SENTINEL] cls=%d ptr=%p BLOCKED (ptr is sentinel)!\n",
class_idx, ptr);
sentinel_ptr_logged++;
}
return 0; // Reject push
}
// ✅ CRITICAL FIX #2: Also check if node's NEXT pointer is sentinel (defense-in-depth)
// This catches nodes that have sentinel in their next field (from remote free)
void* next_check = tiny_next_read(class_idx, ptr);
if (__builtin_expect((uintptr_t)next_check == TINY_REMOTE_SENTINEL, 0)) {
static __thread int sentinel_next_logged = 0;
if (sentinel_next_logged < 5) {
fprintf(stderr, "[FAST_PUSH_NEXT_SENTINEL] cls=%d ptr=%p next=%p BLOCKED (next is sentinel)!\n",
class_idx, ptr, next_check);
sentinel_next_logged++;
}
return 0; // Reject push
}
if (!g_fast_enable) {
g_fast_push_disabled[class_idx]++;
tiny_fast_debug_log(class_idx, "disabled", 0, 0);
return 0;
}
uint16_t cap = g_fast_cap[class_idx];
if (cap == 0) {
g_fast_push_zero_cap[class_idx]++;
tiny_fast_debug_log(class_idx, "zero_cap", g_fast_count[class_idx], cap);
return 0;
}
uint16_t count = g_fast_count[class_idx];
if (count >= cap) {
g_fast_push_full[class_idx]++;
tiny_fast_debug_log(class_idx, "full", count, cap);
return 0;
}
// Phase 7: header-aware next pointer (C0-C6: base+1, C7: base)
#if HAKMEM_TINY_HEADER_CLASSIDX
// Phase E1-CORRECT: ALL classes have 1-byte header, next ptr at offset 1
const size_t next_offset2 = 1;
#else
const size_t next_offset2 = 0;
#endif
// Phase E1-CORRECT: Use Box API for next pointer write (ALL classes: base+1)
#include "box/tiny_next_ptr_box.h"
tiny_next_write(class_idx, ptr, g_fast_head[class_idx]);
g_fast_head[class_idx] = ptr;
g_fast_count[class_idx] = (uint16_t)(count + 1);
g_fast_push_hits[class_idx]++;
tiny_fast_debug_log(class_idx, "hit", (uint16_t)(count + 1), cap);
return 1;
}
// Frontend fast cache operations
static inline hak_base_ptr_t fastcache_pop(int class_idx) {
TinyFastCache* fc = &g_fast_cache[class_idx];
if (__builtin_expect(fc->top > 0, 1)) {
void* base = fc->items[--fc->top];
// ✅ FIX #16: Return BASE pointer (not USER)
// FastCache stores base pointers. Caller will apply HAK_RET_ALLOC
// which does BASE → USER conversion via tiny_region_id_write_header
return HAK_BASE_FROM_RAW(base);
}
return HAK_BASE_FROM_RAW(NULL);
}
static inline int fastcache_push(int class_idx, hak_base_ptr_t base) {
void* ptr = HAK_BASE_TO_RAW(base);
TinyFastCache* fc = &g_fast_cache[class_idx];
if (__builtin_expect(fc->top < TINY_FASTCACHE_CAP, 1)) {
fc->items[fc->top++] = ptr;
g_free_via_fastcache[class_idx]++;
return 1;
}
return 0;
}
// Quick slot pop operation
static inline void* quick_pop(int class_idx) {
TinyQuickSlot* qs = &g_tls_quick[class_idx];
if (__builtin_expect(qs->top > 0, 1)) {
void* p = qs->items[--qs->top];
HAK_TP1(quick_pop, class_idx);
return p;
}
return NULL;
}
#endif // HAKMEM_TINY_FASTCACHE_INC_H