Files
hakmem/core/box/tls_sll_box.h

340 lines
10 KiB
C

// tls_sll_box.h - Box TLS-SLL: Single-Linked List API (Unified Box version)
//
// Goal:
// - Single authoritative Box for TLS SLL operations.
// - All next pointer layout is decided by tiny_next_ptr_box.h (Box API).
// - Callers pass BASE pointers only; no local next_offset arithmetic.
// - Compatible with existing ptr_trace PTR_NEXT_* macros (off is logging-only).
//
// Invariants:
// - g_tiny_class_sizes[cls] is TOTAL stride (including 1-byte header when enabled).
// - For HEADER_CLASSIDX != 0, tiny_nextptr.h encodes:
// class 0: next_off = 0
// class 1-6: next_off = 1
// class 7: next_off = 0
// Callers MUST NOT duplicate this logic.
// - TLS SLL stores BASE pointers only.
// - Box provides: push / pop / splice with capacity & integrity checks.
#ifndef TLS_SLL_BOX_H
#define TLS_SLL_BOX_H
#include <stdint.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "../hakmem_tiny_config.h"
#include "../hakmem_build_flags.h"
#include "../tiny_remote.h"
#include "../tiny_region_id.h"
#include "../hakmem_tiny_integrity.h"
#include "../ptr_track.h"
#include "../ptr_trace.h"
#include "tiny_next_ptr_box.h"
// External TLS SLL state (defined in hakmem_tiny.c or equivalent)
extern __thread void* g_tls_sll_head[TINY_NUM_CLASSES];
extern __thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES];
extern int g_tls_sll_class_mask; // bit i=1 → SLL allowed for class i
// ========== Debug guard ==========
#if !HAKMEM_BUILD_RELEASE
static inline void tls_sll_debug_guard(int class_idx, void* base, const char* where)
{
(void)class_idx;
if ((uintptr_t)base < 4096) {
fprintf(stderr,
"[TLS_SLL_GUARD] %s: suspicious ptr=%p cls=%d\n",
where, base, class_idx);
abort();
}
}
#else
static inline void tls_sll_debug_guard(int class_idx, void* base, const char* where)
{
(void)class_idx; (void)base; (void)where;
}
#endif
// Normalize helper: callers are required to pass BASE already.
// Kept as a no-op for documentation / future hardening.
static inline void* tls_sll_normalize_base(int class_idx, void* node)
{
(void)class_idx;
return node;
}
// ========== Push ==========
//
// Push BASE pointer into TLS SLL for given class.
// Returns true on success, false if capacity full or input invalid.
static inline bool tls_sll_push(int class_idx, void* ptr, uint32_t capacity)
{
HAK_CHECK_CLASS_IDX(class_idx, "tls_sll_push");
// Class mask gate (narrow triage): if disallowed, reject push
if (__builtin_expect(((g_tls_sll_class_mask & (1u << class_idx)) == 0), 0)) {
return false;
}
// Capacity semantics:
// - capacity == 0 → disabled (reject)
// - capacity > 1<<20 → treat as "unbounded" sentinel (no limit)
if (capacity == 0) {
return false;
}
const uint32_t kCapacityHardMax = (1u << 20);
const int unlimited = (capacity > kCapacityHardMax);
if (!ptr) {
return false;
}
// Base pointer only (callers must pass BASE; this is a no-op by design).
ptr = tls_sll_normalize_base(class_idx, ptr);
#if !HAKMEM_BUILD_RELEASE
// Minimal range guard before we touch memory.
if (!validate_ptr_range(ptr, "tls_sll_push_base")) {
fprintf(stderr,
"[TLS_SLL_PUSH] FATAL invalid BASE ptr cls=%d base=%p\n",
class_idx, ptr);
abort();
}
#endif
// Capacity check BEFORE any writes.
uint32_t cur = g_tls_sll_count[class_idx];
if (!unlimited && cur >= capacity) {
return false;
}
#if HAKMEM_TINY_HEADER_CLASSIDX
// Header handling for header classes (class != 0,7).
// Safe mode (HAKMEM_TINY_SLL_SAFEHEADER=1): never overwrite header; reject on magic mismatch.
// Default mode: restore expected header.
if (class_idx != 0 && class_idx != 7) {
static int g_sll_safehdr = -1;
if (__builtin_expect(g_sll_safehdr == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SLL_SAFEHEADER");
g_sll_safehdr = (e && *e && *e != '0') ? 1 : 0;
}
uint8_t* b = (uint8_t*)ptr;
uint8_t expected = (uint8_t)(HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK));
if (g_sll_safehdr) {
uint8_t got = *b;
if ((got & 0xF0u) != HEADER_MAGIC) {
// Reject push silently (fall back to slow path at caller)
return false;
}
} else {
PTR_TRACK_TLS_PUSH(ptr, class_idx);
PTR_TRACK_HEADER_WRITE(ptr, expected);
*b = expected;
}
}
#endif
tls_sll_debug_guard(class_idx, ptr, "push");
#if !HAKMEM_BUILD_RELEASE
// Optional double-free detection: scan a bounded prefix of the list.
{
void* scan = g_tls_sll_head[class_idx];
uint32_t scanned = 0;
const uint32_t limit = (g_tls_sll_count[class_idx] < 64)
? g_tls_sll_count[class_idx]
: 64;
while (scan && scanned < limit) {
if (scan == ptr) {
fprintf(stderr,
"[TLS_SLL_PUSH] FATAL double-free: cls=%d ptr=%p already in SLL\n",
class_idx, ptr);
ptr_trace_dump_now("double_free");
abort();
}
void* next;
PTR_NEXT_READ("tls_sll_scan", class_idx, scan, 0, next);
scan = next;
scanned++;
}
}
#endif
// Link new node to current head via Box API (offset is handled inside tiny_nextptr).
PTR_NEXT_WRITE("tls_push", class_idx, ptr, 0, g_tls_sll_head[class_idx]);
g_tls_sll_head[class_idx] = ptr;
g_tls_sll_count[class_idx] = cur + 1;
return true;
}
// ========== Pop ==========
//
// Pop BASE pointer from TLS SLL.
// Returns true on success and stores BASE into *out.
static inline bool tls_sll_pop(int class_idx, void** out)
{
HAK_CHECK_CLASS_IDX(class_idx, "tls_sll_pop");
// Class mask gate: if disallowed, behave as empty
if (__builtin_expect(((g_tls_sll_class_mask & (1u << class_idx)) == 0), 0)) {
return false;
}
atomic_fetch_add(&g_integrity_check_class_bounds, 1);
void* base = g_tls_sll_head[class_idx];
if (!base) {
return false;
}
// Sentinel guard: remote sentinel must never be in TLS SLL.
if (__builtin_expect((uintptr_t)base == TINY_REMOTE_SENTINEL, 0)) {
g_tls_sll_head[class_idx] = NULL;
g_tls_sll_count[class_idx] = 0;
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr,
"[TLS_SLL_POP] Remote sentinel detected at head; SLL reset (cls=%d)\n",
class_idx);
#endif
return false;
}
#if !HAKMEM_BUILD_RELEASE
if (!validate_ptr_range(base, "tls_sll_pop_base")) {
fprintf(stderr,
"[TLS_SLL_POP] FATAL invalid BASE ptr cls=%d base=%p\n",
class_idx, base);
abort();
}
#endif
tls_sll_debug_guard(class_idx, base, "pop");
#if HAKMEM_TINY_HEADER_CLASSIDX
// Header validation for header-classes (class != 0,7).
if (class_idx != 0 && class_idx != 7) {
uint8_t got = *(uint8_t*)base;
uint8_t expect = (uint8_t)(HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK));
PTR_TRACK_TLS_POP(base, class_idx);
PTR_TRACK_HEADER_READ(base, got);
if (__builtin_expect(got != expect, 0)) {
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr,
"[TLS_SLL_POP] CORRUPTED HEADER cls=%d base=%p got=0x%02x expect=0x%02x\n",
class_idx, base, got, expect);
ptr_trace_dump_now("header_corruption");
abort();
#else
// In release, fail-safe: drop list.
g_tls_sll_head[class_idx] = NULL;
g_tls_sll_count[class_idx] = 0;
return false;
#endif
}
}
#endif
// Read next via Box API.
void* next;
PTR_NEXT_READ("tls_pop", class_idx, base, 0, next);
#if !HAKMEM_BUILD_RELEASE
if (next && !validate_ptr_range(next, "tls_sll_pop_next")) {
fprintf(stderr,
"[TLS_SLL_POP] FATAL invalid next ptr cls=%d base=%p next=%p\n",
class_idx, base, next);
ptr_trace_dump_now("next_corruption");
abort();
}
#endif
g_tls_sll_head[class_idx] = next;
if (g_tls_sll_count[class_idx] > 0) {
g_tls_sll_count[class_idx]--;
}
// Clear next inside popped node to avoid stale-chain issues.
tiny_next_write(class_idx, base, NULL);
*out = base;
return true;
}
// ========== Splice ==========
//
// Splice a pre-linked chain of BASE pointers into TLS SLL head.
// chain_head is BASE; links are via Box API-compatible next layout.
// Returns number of nodes actually moved (<= capacity remaining).
static inline uint32_t tls_sll_splice(int class_idx,
void* chain_head,
uint32_t count,
uint32_t capacity)
{
HAK_CHECK_CLASS_IDX(class_idx, "tls_sll_splice");
if (!chain_head || count == 0 || capacity == 0) {
return 0;
}
uint32_t cur = g_tls_sll_count[class_idx];
if (cur >= capacity) {
return 0;
}
uint32_t room = capacity - cur;
uint32_t to_move = (count < room) ? count : room;
// Traverse chain up to to_move, validate, and find tail.
void* tail = chain_head;
uint32_t moved = 1;
tls_sll_debug_guard(class_idx, chain_head, "splice_head");
#if HAKMEM_TINY_HEADER_CLASSIDX
// Restore header defensively on each node we touch.
{
uint8_t* b = (uint8_t*)chain_head;
uint8_t expected = (uint8_t)(HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK));
*b = expected;
}
#endif
while (moved < to_move) {
tls_sll_debug_guard(class_idx, tail, "splice_traverse");
void* next;
PTR_NEXT_READ("tls_splice_trav", class_idx, tail, 0, next);
if (!next) {
break;
}
#if HAKMEM_TINY_HEADER_CLASSIDX
{
uint8_t* b = (uint8_t*)next;
uint8_t expected = (uint8_t)(HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK));
*b = expected;
}
#endif
tail = next;
moved++;
}
// Link tail to existing head and install new head.
tls_sll_debug_guard(class_idx, tail, "splice_tail");
PTR_NEXT_WRITE("tls_splice_link", class_idx, tail, 0, g_tls_sll_head[class_idx]);
g_tls_sll_head[class_idx] = chain_head;
g_tls_sll_count[class_idx] = cur + moved;
return moved;
}
#endif // TLS_SLL_BOX_H