Front Gate: registry-first classification (no ptr-1 deref); Pool TLS via registry to avoid unsafe header reads.\nTLS-SLL: splice head normalization, remove false misalignment guard, drop heuristic normalization; add carve/splice debug logs.\nRefill: add one-shot sanity checks (range/stride) at P0 and non-P0 boundaries (debug-only).\nInfra: provide ptr_trace_dump_now stub in release to fix linking.\nVerified: bench_fixed_size_hakmem 200000 1024 128 passes (Debug/Release), no SEGV.

This commit is contained in:
Moe Charm (CI)
2025-11-11 01:00:37 +09:00
parent 8aabee4392
commit a97005f50e
5 changed files with 103 additions and 46 deletions

View File

@ -17,7 +17,7 @@
#include "../hakmem_super_registry.h" // For hak_super_lookup (Box REG)
#ifdef HAKMEM_POOL_TLS_PHASE1
#include "../pool_tls.h" // For POOL_MAGIC
#include "../pool_tls_registry.h" // Safer pool pointer lookup (no header deref)
#endif
// ========== Debug Stats ==========
@ -158,19 +158,10 @@ static inline ptr_classification_t registry_lookup(void* ptr) {
// ========== Pool TLS Probe ==========
#ifdef HAKMEM_POOL_TLS_PHASE1
// Check if pointer has Pool TLS magic (0xb0)
// Returns: 1 if Pool TLS, 0 otherwise
static inline int is_pool_tls(void* ptr) {
// Same safety check as header probe
uintptr_t offset_in_page = (uintptr_t)ptr & 0xFFF;
if (offset_in_page == 0) {
return 0; // Page-aligned, skip header read
}
uint8_t* header_ptr = (uint8_t*)ptr - 1;
uint8_t header = *header_ptr;
return (header & 0xF0) == POOL_MAGIC;
// Registry-based Pool TLS probe (no memory deref)
static inline int is_pool_tls_reg(void* ptr) {
pid_t tid = 0; int cls = -1;
return pool_reg_lookup(ptr, &tid, &cls);
}
#endif
@ -191,35 +182,9 @@ ptr_classification_t classify_ptr(void* ptr) {
return result;
}
// Step 1: Try safe header probe (C0-C6 fast path: 5-10 cycles)
// Skip header probe on 1KB-aligned pointers to avoid misclassifying C7/headerless
int class_idx = -1;
if (((uintptr_t)ptr & 0x3FF) != 0) {
class_idx = safe_header_probe(ptr);
}
if (class_idx >= 0) {
// Header found - C0-C6 with header
// Additional safety: verify pointer belongs to a SuperSlab region.
// This avoids rare false positives where random header bytes look like 0xA0.
struct SuperSlab* ss_chk = hak_super_lookup(ptr);
if (!ss_chk) {
// Not in Tiny registry; treat as UNKNOWN and continue
// (fall back to later checks)
} else {
result.kind = PTR_KIND_TINY_HEADER;
result.class_idx = class_idx;
result.ss = ss_chk;
#if !HAKMEM_BUILD_RELEASE
g_classify_header_hit++;
#endif
return result;
}
}
// Step 2: Check Pool TLS (before Registry to avoid false positives)
// Step 1: Check Pool TLS via registry (no pointer deref)
#ifdef HAKMEM_POOL_TLS_PHASE1
if (is_pool_tls(ptr)) {
if (is_pool_tls_reg(ptr)) {
result.kind = PTR_KIND_POOL_TLS;
#if !HAKMEM_BUILD_RELEASE
@ -229,7 +194,7 @@ ptr_classification_t classify_ptr(void* ptr) {
}
#endif
// Step 3: Fallback to Registry lookup (C7 headerless or header failed)
// Step 2: Registry lookup for Tiny (header or headerless)
result = registry_lookup(ptr);
if (result.kind == PTR_KIND_TINY_HEADERLESS) {
#if !HAKMEM_BUILD_RELEASE
@ -237,8 +202,14 @@ ptr_classification_t classify_ptr(void* ptr) {
#endif
return result;
}
if (result.kind == PTR_KIND_TINY_HEADER) {
#if !HAKMEM_BUILD_RELEASE
g_classify_header_hit++;
#endif
return result;
}
// Step 4: Not Tiny or Pool - return UNKNOWN
// Step 3: Not Tiny or Pool - return UNKNOWN
// Caller should check AllocHeader (16-byte) or delegate to system free
result.kind = PTR_KIND_UNKNOWN;

View File

@ -13,7 +13,7 @@ core/box/front_gate_classifier.o: core/box/front_gate_classifier.c \
core/box/../superslab/superslab_inline.h \
core/box/../hakmem_build_flags.h core/box/../hakmem_tiny_config.h \
core/box/../hakmem_super_registry.h core/box/../hakmem_tiny_superslab.h \
core/box/../pool_tls.h
core/box/../pool_tls_registry.h
core/box/front_gate_classifier.h:
core/box/../tiny_region_id.h:
core/box/../hakmem_build_flags.h:
@ -35,4 +35,4 @@ core/box/../hakmem_build_flags.h:
core/box/../hakmem_tiny_config.h:
core/box/../hakmem_super_registry.h:
core/box/../hakmem_tiny_superslab.h:
core/box/../pool_tls.h:
core/box/../pool_tls_registry.h:

View File

@ -23,6 +23,8 @@
#include "hakmem_tiny_magazine.h"
#include "hakmem_tiny_tls_list.h"
#include "tiny_box_geometry.h" // Box 3: Geometry & Capacity Calculator
#include "hakmem_super_registry.h" // For hak_super_lookup (Debug validation)
#include "superslab/superslab_inline.h" // For slab_index_for/ss_slabs_capacity (Debug validation)
#include "box/tls_sll_box.h" // Box TLS-SLL: Safe SLL operations API
#include <stdint.h>
#include <pthread.h>
@ -97,6 +99,46 @@ static inline int ultra_sll_cap_for_class(int class_idx);
// Note: tiny_small_mags_init_once and tiny_mag_init_if_needed are declared in hakmem_tiny_magazine.h
static void eventq_push(int class_idx, uint32_t size);
// Debug-only: Validate that a base node belongs to the expected Tiny SuperSlab and is stride-aligned
#if !HAKMEM_BUILD_RELEASE
static inline void tiny_debug_validate_node_base(int class_idx, void* node, const char* where) {
if ((uintptr_t)node < 4096) {
fprintf(stderr, "[SLL_NODE_SMALL] %s: node=%p cls=%d\n", where, node, class_idx);
abort();
}
SuperSlab* ss = hak_super_lookup(node);
if (!ss) {
fprintf(stderr, "[SLL_NODE_UNKNOWN] %s: node=%p cls=%d\n", where, node, class_idx);
abort();
}
int ocls = ss->size_class;
if (ocls == 7 || ocls != class_idx) {
fprintf(stderr, "[SLL_NODE_CLASS_MISMATCH] %s: node=%p cls=%d owner_cls=%d\n", where, node, class_idx, ocls);
abort();
}
int slab_idx = slab_index_for(ss, node);
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
fprintf(stderr, "[SLL_NODE_SLAB_OOB] %s: node=%p slab_idx=%d cap=%d\n", where, node, slab_idx, ss_slabs_capacity(ss));
abort();
}
uint8_t* base = tiny_slab_base_for_geometry(ss, slab_idx);
size_t usable = tiny_usable_bytes_for_slab(slab_idx);
size_t stride = tiny_stride_for_class(ocls);
uintptr_t a = (uintptr_t)node;
if (a < (uintptr_t)base || a >= (uintptr_t)base + usable) {
fprintf(stderr, "[SLL_NODE_RANGE] %s: node=%p base=%p usable=%zu\n", where, node, base, usable);
abort();
}
size_t off = (size_t)(a - (uintptr_t)base);
if (off % stride != 0) {
fprintf(stderr, "[SLL_NODE_MISALIGNED] %s: node=%p off=%zu stride=%zu base=%p\n", where, node, off, stride, base);
abort();
}
}
#else
static inline void tiny_debug_validate_node_base(int class_idx, void* node, const char* where) { (void)class_idx; (void)node; (void)where; }
#endif
// Fast cache refill and take operation
static inline void* tiny_fast_refill_and_take(int class_idx, TinyTLSList* tls) {
void* direct = tiny_fast_pop(class_idx);
@ -151,6 +193,10 @@ static inline int quick_refill_from_sll(int class_idx) {
// CRITICAL: Use Box TLS-SLL API to avoid race condition (rbp=0xa0 SEGV)
void* head = NULL;
if (!tls_sll_pop(class_idx, &head)) break;
// One-shot validation for the first pop
#if !HAKMEM_BUILD_RELEASE
do { static _Atomic int once = 0; int exp = 0; if (atomic_compare_exchange_strong(&once, &exp, 1)) { tiny_debug_validate_node_base(class_idx, head, "quick_refill_from_sll"); } } while (0);
#endif
qs->items[qs->top++] = head;
room--; filled++;
}
@ -419,6 +465,10 @@ static inline int frontend_refill_fc(int class_idx) {
while (need > 0) {
void* h = NULL;
if (!tls_sll_pop(class_idx, &h)) break;
// One-shot validation for the first pop into FastCache
#if !HAKMEM_BUILD_RELEASE
do { static _Atomic int once_fc = 0; int exp2 = 0; if (atomic_compare_exchange_strong(&once_fc, &exp2, 1)) { tiny_debug_validate_node_base(class_idx, h, "frontend_refill_fc"); } } while (0);
#endif
fc->items[fc->top++] = h;
need--; filled++;
if (fc->top >= TINY_FASTCACHE_CAP) break;

View File

@ -311,6 +311,39 @@ static inline int sll_refill_batch_from_ss(int class_idx, int max_take) {
TinyRefillChain carve;
trc_linear_carve(slab_base, bs, meta, batch, class_idx, &carve);
// One-shot sanity: validate first few nodes are within the slab and stride-aligned
#if !HAKMEM_BUILD_RELEASE
do {
static _Atomic int g_once = 0;
int exp = 0;
if (atomic_compare_exchange_strong(&g_once, &exp, 1)) {
uintptr_t base_chk = (uintptr_t)(tls->slab_base ? tls->slab_base : tiny_slab_base_for(tls->ss, tls->slab_idx));
uintptr_t limit_chk = base_chk + tiny_usable_bytes_for_slab(tls->slab_idx);
void* node = carve.head;
for (int i = 0; i < 3 && node; i++) {
uintptr_t a = (uintptr_t)node;
if (!(a >= base_chk && a < limit_chk)) {
fprintf(stderr, "[P0_SANITY_FAIL] out_of_range cls=%d node=%p base=%p limit=%p bs=%zu\n",
class_idx, node, (void*)base_chk, (void*)limit_chk, bs);
abort();
}
size_t off = (size_t)(a - base_chk);
if ((off % bs) != 0) {
fprintf(stderr, "[P0_SANITY_FAIL] misaligned cls=%d node=%p off=%zu bs=%zu base=%p\n",
class_idx, node, off, bs, (void*)base_chk);
abort();
}
#if HAKMEM_TINY_HEADER_CLASSIDX
const size_t next_off = (class_idx == 7) ? 0 : 1;
#else
const size_t next_off = 0;
#endif
node = *(void**)((uint8_t*)node + next_off);
}
}
} while (0);
#endif
trc_splice_to_sll(class_idx, &carve, &g_tls_sll_head[class_idx], &g_tls_sll_count[class_idx]);
// FIX: Update SuperSlab active counter (was missing!)
ss_active_add(tls->ss, batch);

View File

@ -107,4 +107,7 @@ static inline void ptr_trace_dump_now(const char* reason) { (void)reason; }
#define PTR_NEXT_READ(tag, cls, node, off, out_var) \
((out_var) = *(void**)((uint8_t*)(node) + (off)))
// Always provide a stub for release builds so callers can link
static inline void ptr_trace_dump_now(const char* reason) { (void)reason; }
#endif // HAKMEM_PTR_TRACE