// tiny_guard_box.h - Unified Safety Guards for Tiny Allocator // Purpose: Centralized validation for TLS push/drain/recycle operations // License: MIT // Date: 2025-11-30 (Phase 9-3 unification) // // Box Theory Principles: // - Single Responsibility: All tiny allocator safety validations // - Clear Contract: true = safe to proceed, false = reject operation // - Observable: HAKMEM_TINY_GUARD=1 enables all guards + logging // - Composable: Called from tls_sll_box.h, tls_sll_drain_box.h, slab_recycling_box.h // // Phase 9-3 Unification: // Previously scattered across: // - tls_sll_guard_box.h (TLS push validation) // - tls_sll_drain_box.h (drain assertions) // - slab_recycling_box.h (recycle checks) // Now unified under single HAKMEM_TINY_GUARD environment variable. // // Guards: // 1. TLS Push Guard: Prevent cross-slab pointer contamination // 2. TLS Drain Guard: Validate meta->used == 0 before drain // 3. Slab Recycle Guard: Validate EMPTY state before recycle #ifndef HAKMEM_TINY_GUARD_BOX_H #define HAKMEM_TINY_GUARD_BOX_H #include "../hakmem_tiny_superslab_internal.h" #include "../hakmem_shared_pool.h" #include "../superslab/superslab_inline.h" #include "tiny_geometry_box.h" // Phase 9-3: Unified pointer arithmetic #include #include // ========== TLS SLL Push Guard ========== // // Validates that pointer belongs to the expected SuperSlab/slab/class // before allowing TLS push. Prevents cross-slab contamination. // // Parameters: // class_idx: Expected class index for this TLS freelist // ptr: Pointer to validate (BASE pointer) // ss_out: (Optional) Output parameter for validated SuperSlab // slab_idx_out: (Optional) Output parameter for validated slab index // // Returns: // true = safe to push (all checks passed) // false = reject push (caller should use slow path / remote push) // // Guard Logic: // 1. SuperSlab lookup: Verify pointer belongs to a valid SuperSlab // 2. Slab index lookup: Find which slab within SuperSlab contains pointer // 3. Class validation: Verify slab's class_idx matches expected class // 4. State validation: Verify slab is ACTIVE (not EMPTY/UNUSED) // // Performance: // - Guard disabled by default in release builds (zero overhead) // - Debug builds: always enabled // - Release builds: enable via HAKMEM_TLS_SLL_GUARD=1 // // Counters (debug builds only): // - g_guard_no_ss: Pointer not in any SuperSlab // - g_guard_no_slab: Pointer not in any slab (invalid address range) // - g_guard_class_mismatch: Slab class doesn't match expected class // - g_guard_not_active: Slab state is not ACTIVE static inline bool tls_sll_push_guard(int class_idx, void* ptr, SuperSlab** ss_out, int* slab_idx_out) { // Step 0: Guard enable/disable check (cached per thread) static __thread int s_guard_enabled = -1; if (__builtin_expect(s_guard_enabled == -1, 0)) { #if !HAKMEM_BUILD_RELEASE s_guard_enabled = 1; // Always on in debug builds #else const char* e = getenv("HAKMEM_TINY_GUARD"); s_guard_enabled = (e && *e && *e != '0') ? 1 : 0; #endif } if (!s_guard_enabled) { return true; // Guard disabled, always pass } // Step 1: SuperSlab lookup (use unified geometry API) SuperSlab* ss = SS_FROM_PTR(ptr); if (!ss) { // Not in any SuperSlab - reject push #if !HAKMEM_BUILD_RELEASE static _Atomic uint32_t g_guard_no_ss = 0; if (atomic_fetch_add_explicit(&g_guard_no_ss, 1, memory_order_relaxed) < 10) { fprintf(stderr, "[TLS_SLL_GUARD_NO_SS] cls=%d ptr=%p (rejecting push)\n", class_idx, ptr); } #endif return false; } // Step 2: Find slab index (use unified geometry API) int slab_idx = SLAB_IDX_FROM_PTR(ss, ptr); if (slab_idx < 0) { #if !HAKMEM_BUILD_RELEASE static _Atomic uint32_t g_guard_no_slab = 0; if (atomic_fetch_add_explicit(&g_guard_no_slab, 1, memory_order_relaxed) < 10) { fprintf(stderr, "[TLS_SLL_GUARD_NO_SLAB] cls=%d ptr=%p ss=%p (rejecting push)\n", class_idx, ptr, (void*)ss); } #endif return false; } // Step 3: Validate class match TinySlabMeta* meta = &ss->slabs[slab_idx]; if (meta->class_idx != (uint8_t)class_idx) { // Class mismatch - slab was reused for different class #if !HAKMEM_BUILD_RELEASE static _Atomic uint32_t g_guard_class_mismatch = 0; if (atomic_fetch_add_explicit(&g_guard_class_mismatch, 1, memory_order_relaxed) < 10) { fprintf(stderr, "[TLS_SLL_GUARD_CLASS_MISMATCH] cls=%d ptr=%p slab_cls=%d ss=%p slab=%d (rejecting push)\n", class_idx, ptr, meta->class_idx, (void*)ss, slab_idx); } #endif return false; } // Step 4: Validate slab is ACTIVE (not EMPTY/UNUSED) // Use external function to find SharedSSMeta for this SuperSlab extern SharedSSMeta* sp_find_meta_for_ss(SuperSlab* ss); SharedSSMeta* sp_meta = sp_find_meta_for_ss(ss); if (sp_meta) { SlotState state = atomic_load_explicit(&sp_meta->slots[slab_idx].state, memory_order_acquire); if (state != SLOT_ACTIVE) { #if !HAKMEM_BUILD_RELEASE static _Atomic uint32_t g_guard_not_active = 0; if (atomic_fetch_add_explicit(&g_guard_not_active, 1, memory_order_relaxed) < 10) { fprintf(stderr, "[TLS_SLL_GUARD_NOT_ACTIVE] cls=%d ptr=%p state=%d ss=%p slab=%d (rejecting push)\n", class_idx, ptr, state, (void*)ss, slab_idx); } #endif return false; } } // All checks passed - safe to push if (ss_out) *ss_out = ss; if (slab_idx_out) *slab_idx_out = slab_idx; return true; } // ========== TLS SLL Drain Guard ========== // // Validates that slab is truly empty (meta->used == 0) before draining // TLS freelist back to the slab. // // Parameters: // class_idx: Class index for logging // ss: SuperSlab pointer // slab_idx: Slab index within SuperSlab // meta: Slab metadata pointer // // Returns: // true = safe to drain (used == 0) // false = reject drain (used != 0, potential corruption) // // Counter (debug builds only): // - g_guard_drain_used: Drain attempts with non-zero used count static inline bool tiny_guard_drain_check(int class_idx, SuperSlab* ss, int slab_idx, TinySlabMeta* meta) { static __thread int s_guard_enabled = -1; if (__builtin_expect(s_guard_enabled == -1, 0)) { #if !HAKMEM_BUILD_RELEASE s_guard_enabled = 1; // Always on in debug builds #else const char* e = getenv("HAKMEM_TINY_GUARD"); s_guard_enabled = (e && *e && *e != '0') ? 1 : 0; #endif } if (!s_guard_enabled) return true; // Check meta->used == 0 uint16_t used = atomic_load_explicit(&meta->used, memory_order_relaxed); if (used != 0) { #if !HAKMEM_BUILD_RELEASE static _Atomic uint32_t g_guard_drain_used = 0; if (atomic_fetch_add_explicit(&g_guard_drain_used, 1, memory_order_relaxed) < 10) { fprintf(stderr, "[TINY_GUARD_DRAIN_USED] cls=%d ss=%p slab=%d used=%u (expected 0)\n", class_idx, (void*)ss, slab_idx, used); } #endif return false; } return true; } // ========== Slab Recycle Guard ========== // // Validates that slab is truly EMPTY before recycling to another class. // // Parameters: // ss: SuperSlab pointer // slab_idx: Slab index within SuperSlab // meta: Slab metadata pointer // // Returns: // true = safe to recycle (used == 0 && capacity > 0) // false = reject recycle (invalid state) // // Counters (debug builds only): // - g_guard_recycle_used: Recycle attempts with non-zero used count // - g_guard_recycle_no_cap: Recycle attempts with zero capacity static inline bool tiny_guard_recycle_check(SuperSlab* ss, int slab_idx, TinySlabMeta* meta) { static __thread int s_guard_enabled = -1; if (__builtin_expect(s_guard_enabled == -1, 0)) { #if !HAKMEM_BUILD_RELEASE s_guard_enabled = 1; // Always on in debug builds #else const char* e = getenv("HAKMEM_TINY_GUARD"); s_guard_enabled = (e && *e && *e != '0') ? 1 : 0; #endif } if (!s_guard_enabled) return true; // Check used == 0 uint16_t used = atomic_load_explicit(&meta->used, memory_order_relaxed); if (used != 0) { #if !HAKMEM_BUILD_RELEASE static _Atomic uint32_t g_guard_recycle_used = 0; if (atomic_fetch_add_explicit(&g_guard_recycle_used, 1, memory_order_relaxed) < 10) { fprintf(stderr, "[TINY_GUARD_RECYCLE_USED] ss=%p slab=%d used=%u (expected 0)\n", (void*)ss, slab_idx, used); } #endif return false; } // Check capacity > 0 if (meta->capacity == 0) { #if !HAKMEM_BUILD_RELEASE static _Atomic uint32_t g_guard_recycle_no_cap = 0; if (atomic_fetch_add_explicit(&g_guard_recycle_no_cap, 1, memory_order_relaxed) < 10) { fprintf(stderr, "[TINY_GUARD_RECYCLE_NO_CAP] ss=%p slab=%d cap=0\n", (void*)ss, slab_idx); } #endif return false; } return true; } // ========== Unified Guard Enable Check ========== // // Single source of truth for guard enable/disable state. // // Returns: // 1 = guards enabled // 0 = guards disabled static inline int tiny_guard_enabled(void) { static __thread int s_guard_enabled = -1; if (__builtin_expect(s_guard_enabled == -1, 0)) { #if !HAKMEM_BUILD_RELEASE s_guard_enabled = 1; // Always on in debug builds #else const char* e = getenv("HAKMEM_TINY_GUARD"); s_guard_enabled = (e && *e && *e != '0') ? 1 : 0; #endif } return s_guard_enabled; } #endif // HAKMEM_TINY_GUARD_BOX_H