Files
hakmem/core/box/tiny_guard_box.h

278 lines
9.6 KiB
C
Raw Normal View History

Phase 9-3: Box Theory refactoring (TLS_SLL_DUP root fix) Implementation: - Step 1: TLS SLL Guard Box (push前meta/class/state突合) - Step 2: SP_REBIND_SLOT macro (原子的slab rebind) - Step 3: Unified Geometry Box (ポインタ演算API統一) - Step 4: Unified Guard Box (HAKMEM_TINY_GUARD=1 統一制御) New Files (545 lines): - core/box/tiny_guard_box.h (277L) - TLS push guard (SuperSlab/slab/class/state validation) - Recycle guard (EMPTY確認) - Drain guard (準備) - 統一ENV制御: HAKMEM_TINY_GUARD=1 - core/box/tiny_geometry_box.h (174L) - BASE_FROM_USER/USER_FROM_BASE conversion - SS_FROM_PTR/SLAB_IDX_FROM_PTR lookup - PTR_CLASSIFY combined helper - 85+箇所の重複コード削減候補を特定 - core/box/sp_rebind_slot_box.h (94L) - SP_REBIND_SLOT macro (geometry + TLS reset + class_map原子化) - 6箇所に適用 (Stage 0/0.5/1/2/3) - デバッグトレース: HAKMEM_SP_REBIND_TRACE=1 Results: - ✅ TLS_SLL_DUP完全根絶 (0 crashes, 0 guard rejects) - ✅ パフォーマンス改善 +5.9% (15.16M → 16.05M ops/s on WS8192) - ✅ コンパイル警告0件(新規) - ✅ Box Theory準拠 (Single Responsibility, Clear Contract, Observable, Composable) Test Results: - Debug build: HAKMEM_TINY_GUARD=1 で10M iterations完走 - Release build: 3回平均 16.05M ops/s - Guard reject rate: 0% - Core dump: なし Box Theory Compliance: - Single Responsibility: 各Boxが単一責任 (guard/rebind/geometry) - Clear Contract: 明確なAPI境界 - Observable: ENV変数で制御可能な検証 - Composable: 全allocation/free pathから利用可能 Performance Impact: - Release build (guard無効): 影響なし (+5.9%改善) - Debug build (guard有効): 数%のオーバーヘッド (検証コスト) Architecture Improvements: - ポインタ演算の一元管理 (85+箇所の統一候補) - Slab rebindの原子性保証 - 検証機能の統合 (単一ENV制御) Phase 9 Status: - 性能目標 (25-30M ops/s): 未達 (16.05M = 53-64%) - TLS_SLL_DUP根絶: ✅ 達成 - コード品質: ✅ 大幅向上 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-30 10:48:50 +09:00
// tiny_guard_box.h - Unified Safety Guards for Tiny Allocator
// Purpose: Centralized validation for TLS push/drain/recycle operations
// License: MIT
// Date: 2025-11-30 (Phase 9-3 unification)
//
// Box Theory Principles:
// - Single Responsibility: All tiny allocator safety validations
// - Clear Contract: true = safe to proceed, false = reject operation
// - Observable: HAKMEM_TINY_GUARD=1 enables all guards + logging
// - Composable: Called from tls_sll_box.h, tls_sll_drain_box.h, slab_recycling_box.h
//
// Phase 9-3 Unification:
// Previously scattered across:
// - tls_sll_guard_box.h (TLS push validation)
// - tls_sll_drain_box.h (drain assertions)
// - slab_recycling_box.h (recycle checks)
// Now unified under single HAKMEM_TINY_GUARD environment variable.
//
// Guards:
// 1. TLS Push Guard: Prevent cross-slab pointer contamination
// 2. TLS Drain Guard: Validate meta->used == 0 before drain
// 3. Slab Recycle Guard: Validate EMPTY state before recycle
#ifndef HAKMEM_TINY_GUARD_BOX_H
#define HAKMEM_TINY_GUARD_BOX_H
#include "../hakmem_tiny_superslab_internal.h"
#include "../hakmem_shared_pool.h"
#include "../superslab/superslab_inline.h"
#include "tiny_geometry_box.h" // Phase 9-3: Unified pointer arithmetic
#include <stdio.h>
#include <stdlib.h>
// ========== TLS SLL Push Guard ==========
//
// Validates that pointer belongs to the expected SuperSlab/slab/class
// before allowing TLS push. Prevents cross-slab contamination.
//
// Parameters:
// class_idx: Expected class index for this TLS freelist
// ptr: Pointer to validate (BASE pointer)
// ss_out: (Optional) Output parameter for validated SuperSlab
// slab_idx_out: (Optional) Output parameter for validated slab index
//
// Returns:
// true = safe to push (all checks passed)
// false = reject push (caller should use slow path / remote push)
//
// Guard Logic:
// 1. SuperSlab lookup: Verify pointer belongs to a valid SuperSlab
// 2. Slab index lookup: Find which slab within SuperSlab contains pointer
// 3. Class validation: Verify slab's class_idx matches expected class
// 4. State validation: Verify slab is ACTIVE (not EMPTY/UNUSED)
//
// Performance:
// - Guard disabled by default in release builds (zero overhead)
// - Debug builds: always enabled
// - Release builds: enable via HAKMEM_TLS_SLL_GUARD=1
//
// Counters (debug builds only):
// - g_guard_no_ss: Pointer not in any SuperSlab
// - g_guard_no_slab: Pointer not in any slab (invalid address range)
// - g_guard_class_mismatch: Slab class doesn't match expected class
// - g_guard_not_active: Slab state is not ACTIVE
static inline bool tls_sll_push_guard(int class_idx, void* ptr, SuperSlab** ss_out, int* slab_idx_out)
{
// Step 0: Guard enable/disable check (cached per thread)
static __thread int s_guard_enabled = -1;
if (__builtin_expect(s_guard_enabled == -1, 0)) {
#if !HAKMEM_BUILD_RELEASE
s_guard_enabled = 1; // Always on in debug builds
#else
const char* e = getenv("HAKMEM_TINY_GUARD");
s_guard_enabled = (e && *e && *e != '0') ? 1 : 0;
#endif
}
if (!s_guard_enabled) {
return true; // Guard disabled, always pass
}
// Step 1: SuperSlab lookup (use unified geometry API)
SuperSlab* ss = SS_FROM_PTR(ptr);
if (!ss) {
// Not in any SuperSlab - reject push
#if !HAKMEM_BUILD_RELEASE
static _Atomic uint32_t g_guard_no_ss = 0;
if (atomic_fetch_add_explicit(&g_guard_no_ss, 1, memory_order_relaxed) < 10) {
fprintf(stderr, "[TLS_SLL_GUARD_NO_SS] cls=%d ptr=%p (rejecting push)\n",
class_idx, ptr);
}
#endif
return false;
}
// Step 2: Find slab index (use unified geometry API)
int slab_idx = SLAB_IDX_FROM_PTR(ss, ptr);
if (slab_idx < 0) {
#if !HAKMEM_BUILD_RELEASE
static _Atomic uint32_t g_guard_no_slab = 0;
if (atomic_fetch_add_explicit(&g_guard_no_slab, 1, memory_order_relaxed) < 10) {
fprintf(stderr, "[TLS_SLL_GUARD_NO_SLAB] cls=%d ptr=%p ss=%p (rejecting push)\n",
class_idx, ptr, (void*)ss);
}
#endif
return false;
}
// Step 3: Validate class match
TinySlabMeta* meta = &ss->slabs[slab_idx];
if (meta->class_idx != (uint8_t)class_idx) {
// Class mismatch - slab was reused for different class
#if !HAKMEM_BUILD_RELEASE
static _Atomic uint32_t g_guard_class_mismatch = 0;
if (atomic_fetch_add_explicit(&g_guard_class_mismatch, 1, memory_order_relaxed) < 10) {
fprintf(stderr, "[TLS_SLL_GUARD_CLASS_MISMATCH] cls=%d ptr=%p slab_cls=%d ss=%p slab=%d (rejecting push)\n",
class_idx, ptr, meta->class_idx, (void*)ss, slab_idx);
}
#endif
return false;
}
// Step 4: Validate slab is ACTIVE (not EMPTY/UNUSED)
// Use external function to find SharedSSMeta for this SuperSlab
extern SharedSSMeta* sp_find_meta_for_ss(SuperSlab* ss);
SharedSSMeta* sp_meta = sp_find_meta_for_ss(ss);
if (sp_meta) {
SlotState state = atomic_load_explicit(&sp_meta->slots[slab_idx].state, memory_order_acquire);
if (state != SLOT_ACTIVE) {
#if !HAKMEM_BUILD_RELEASE
static _Atomic uint32_t g_guard_not_active = 0;
if (atomic_fetch_add_explicit(&g_guard_not_active, 1, memory_order_relaxed) < 10) {
fprintf(stderr, "[TLS_SLL_GUARD_NOT_ACTIVE] cls=%d ptr=%p state=%d ss=%p slab=%d (rejecting push)\n",
class_idx, ptr, state, (void*)ss, slab_idx);
}
#endif
return false;
}
}
// All checks passed - safe to push
if (ss_out) *ss_out = ss;
if (slab_idx_out) *slab_idx_out = slab_idx;
return true;
}
// ========== TLS SLL Drain Guard ==========
//
// Validates that slab is truly empty (meta->used == 0) before draining
// TLS freelist back to the slab.
//
// Parameters:
// class_idx: Class index for logging
// ss: SuperSlab pointer
// slab_idx: Slab index within SuperSlab
// meta: Slab metadata pointer
//
// Returns:
// true = safe to drain (used == 0)
// false = reject drain (used != 0, potential corruption)
//
// Counter (debug builds only):
// - g_guard_drain_used: Drain attempts with non-zero used count
static inline bool tiny_guard_drain_check(int class_idx, SuperSlab* ss, int slab_idx, TinySlabMeta* meta)
{
static __thread int s_guard_enabled = -1;
if (__builtin_expect(s_guard_enabled == -1, 0)) {
#if !HAKMEM_BUILD_RELEASE
s_guard_enabled = 1; // Always on in debug builds
#else
const char* e = getenv("HAKMEM_TINY_GUARD");
s_guard_enabled = (e && *e && *e != '0') ? 1 : 0;
#endif
}
if (!s_guard_enabled) return true;
// Check meta->used == 0
uint16_t used = atomic_load_explicit(&meta->used, memory_order_relaxed);
if (used != 0) {
#if !HAKMEM_BUILD_RELEASE
static _Atomic uint32_t g_guard_drain_used = 0;
if (atomic_fetch_add_explicit(&g_guard_drain_used, 1, memory_order_relaxed) < 10) {
fprintf(stderr, "[TINY_GUARD_DRAIN_USED] cls=%d ss=%p slab=%d used=%u (expected 0)\n",
class_idx, (void*)ss, slab_idx, used);
}
#endif
return false;
}
return true;
}
// ========== Slab Recycle Guard ==========
//
// Validates that slab is truly EMPTY before recycling to another class.
//
// Parameters:
// ss: SuperSlab pointer
// slab_idx: Slab index within SuperSlab
// meta: Slab metadata pointer
//
// Returns:
// true = safe to recycle (used == 0 && capacity > 0)
// false = reject recycle (invalid state)
//
// Counters (debug builds only):
// - g_guard_recycle_used: Recycle attempts with non-zero used count
// - g_guard_recycle_no_cap: Recycle attempts with zero capacity
static inline bool tiny_guard_recycle_check(SuperSlab* ss, int slab_idx, TinySlabMeta* meta)
{
static __thread int s_guard_enabled = -1;
if (__builtin_expect(s_guard_enabled == -1, 0)) {
#if !HAKMEM_BUILD_RELEASE
s_guard_enabled = 1; // Always on in debug builds
#else
const char* e = getenv("HAKMEM_TINY_GUARD");
s_guard_enabled = (e && *e && *e != '0') ? 1 : 0;
#endif
}
if (!s_guard_enabled) return true;
// Check used == 0
uint16_t used = atomic_load_explicit(&meta->used, memory_order_relaxed);
if (used != 0) {
#if !HAKMEM_BUILD_RELEASE
static _Atomic uint32_t g_guard_recycle_used = 0;
if (atomic_fetch_add_explicit(&g_guard_recycle_used, 1, memory_order_relaxed) < 10) {
fprintf(stderr, "[TINY_GUARD_RECYCLE_USED] ss=%p slab=%d used=%u (expected 0)\n",
(void*)ss, slab_idx, used);
}
#endif
return false;
}
// Check capacity > 0
if (meta->capacity == 0) {
#if !HAKMEM_BUILD_RELEASE
static _Atomic uint32_t g_guard_recycle_no_cap = 0;
if (atomic_fetch_add_explicit(&g_guard_recycle_no_cap, 1, memory_order_relaxed) < 10) {
fprintf(stderr, "[TINY_GUARD_RECYCLE_NO_CAP] ss=%p slab=%d cap=0\n",
(void*)ss, slab_idx);
}
#endif
return false;
}
return true;
}
// ========== Unified Guard Enable Check ==========
//
// Single source of truth for guard enable/disable state.
//
// Returns:
// 1 = guards enabled
// 0 = guards disabled
static inline int tiny_guard_enabled(void)
{
static __thread int s_guard_enabled = -1;
if (__builtin_expect(s_guard_enabled == -1, 0)) {
#if !HAKMEM_BUILD_RELEASE
s_guard_enabled = 1; // Always on in debug builds
#else
const char* e = getenv("HAKMEM_TINY_GUARD");
s_guard_enabled = (e && *e && *e != '0') ? 1 : 0;
#endif
}
return s_guard_enabled;
}
#endif // HAKMEM_TINY_GUARD_BOX_H