Files
hakmem/core/superslab/superslab_inline.h
Moe Charm (CI) 25cb7164c7 Comprehensive legacy cleanup and architecture consolidation
Summary of Changes:

MOVED TO ARCHIVE:
- core/hakmem_tiny_legacy_slow_box.inc → archive/
  * Slow path legacy code preserved for reference
  * Superseded by Gatekeeper Box architecture

- core/superslab_allocate.c → archive/superslab_allocate_legacy.c
  * Legacy SuperSlab allocation implementation
  * Functionality integrated into new Box system

- core/superslab_head.c → archive/superslab_head_legacy.c
  * Legacy slab head management
  * Refactored through Box architecture

REMOVED DEAD CODE:
- Eliminated unused allocation policy variants from ss_allocation_box.c
  * Reduced from 127+ lines of conditional logic to focused implementation
  * Removed: old policy branches, unused allocation strategies
  * Kept: current Box-based allocation path

ADDED NEW INFRASTRUCTURE:
- core/superslab_head_stub.c (41 lines)
  * Minimal stub for backward compatibility
  * Delegates to new architecture

- Enhanced core/superslab_cache.c (75 lines added)
  * Added missing API functions for cache management
  * Proper interface for SuperSlab cache integration

REFACTORED CORE SYSTEMS:
- core/hakmem_super_registry.c
  * Moved registration logic from scattered locations
  * Centralized SuperSlab registry management

- core/hakmem_tiny.c
  * Removed 27 lines of redundant initialization
  * Simplified through Box architecture

- core/hakmem_tiny_alloc.inc
  * Streamlined allocation path to use Gatekeeper
  * Removed legacy decision logic

- core/box/ss_allocation_box.c/h
  * Dramatically simplified allocation policy
  * Removed conditional branches for unused strategies
  * Focused on current Box-based approach

BUILD SYSTEM:
- Updated Makefile for archive structure
- Removed obsolete object file references
- Maintained build compatibility

SAFETY & TESTING:
- All deletions verified: no broken references
- Build verification: RELEASE=0 and RELEASE=1 pass
- Smoke tests: 100% pass rate
- Functional verification: allocation/free intact

Architecture Consolidation:
Before: Multiple overlapping allocation paths with legacy code branches
After:  Single unified path through Gatekeeper Boxes with clear architecture

Benefits:
- Reduced code size and complexity
- Improved maintainability
- Single source of truth for allocation logic
- Better diagnostic/observability hooks
- Foundation for future optimizations

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-04 14:22:48 +09:00

302 lines
11 KiB
C

#ifndef SUPERSLAB_INLINE_H
#define SUPERSLAB_INLINE_H
#include "superslab_types.h"
#include "../tiny_box_geometry.h" // Box 3 geometry helpers (stride/base/capacity)
// Forward declaration for unsafe remote drain used by refill/handle paths
// Implemented in hakmem_tiny_superslab.c
void _ss_remote_drain_to_freelist_unsafe(SuperSlab* ss, int slab_idx, TinySlabMeta* meta);
// Optional debug counter (defined in hakmem_tiny_superslab.c)
extern _Atomic uint64_t g_ss_active_dec_calls;
// ========== SuperSlab Lookup Box (Phase 2: Box化) ==========
// Purpose: Formalize SuperSlab lookup contracts with clear safety guarantees
//
// Evolution:
// - Phase 12: UNSAFE mask+dereference (5-10 cycles) → 12% crash rate
// - Phase 1b: SAFE registry lookup (50-100 cycles) → 0% crash rate
// - Phase 2: Box化 - multiple contracts (UNSAFE/SAFE/GUARDED)
//
// Box Pattern Benefits:
// 1. Clear contracts: Each API documents preconditions and guarantees
// 2. Multiple levels: Choose speed vs safety based on context
// 3. Future-proof: Enables optimizations without breaking existing code
//
// APIs:
// - ss_lookup_unsafe() : 5-10 cycles, requires validated pointer
// - ss_lookup_safe() : 50-100 cycles, works with arbitrary pointers
// - ss_lookup_guarded() : 100-200 cycles, adds integrity checks
// - ss_fast_lookup() : Backward compatible (→ ss_lookup_safe)
//
// Note: hak_super_lookup() is implemented in hakmem_super_registry.h as static inline.
// We provide a forward declaration here so that ss_lookup_guarded() can call it
// even in translation units where hakmem_super_registry.h is included later.
static inline SuperSlab* hak_super_lookup(void* ptr);
// ============================================================================
// Contract Level 1: UNSAFE - Fast but dangerous (internal use only)
// ============================================================================
//
// Preconditions:
// - ptr MUST be a valid Tiny allocation pointer (already validated)
// - ptr MUST be within a mapped SuperSlab region
// - Violation of preconditions → SEGFAULT
//
// Use cases:
// - After header magic validation (LARSON_FIX paths)
// - Internal paths where pointer origin is known
//
// Performance: ~5-10 cycles
// Safety: ⚠️ UNSAFE - caller must ensure preconditions
//
static inline SuperSlab* ss_lookup_unsafe(void* ptr)
{
if (__builtin_expect(!ptr, 0)) return NULL;
uintptr_t p = (uintptr_t)ptr;
// Step 1: Mask with minimum SuperSlab size (1MB alignment)
SuperSlab* ss = (SuperSlab*)(p & ~((uintptr_t)SUPERSLAB_SIZE_MIN - 1u));
// Step 2: Validate magic (quick reject for non-SuperSlab memory)
// ⚠️ DANGER: This dereference can SEGFAULT if preconditions not met
if (__builtin_expect(ss->magic != SUPERSLAB_MAGIC, 0)) {
return NULL;
}
// Step 3: Range check (ptr must be within this SuperSlab)
size_t ss_size = (size_t)1 << ss->lg_size;
if (__builtin_expect(p >= (uintptr_t)ss + ss_size, 0)) {
return NULL;
}
return ss;
}
// ============================================================================
// Contract Level 2: SAFE - Registry-based (recommended)
// ============================================================================
//
// Preconditions: None (works with arbitrary pointers)
//
// Guarantees:
// - Never dereferences unmapped memory
// - Returns NULL for invalid pointers (stack, heap, garbage, etc.)
// - Thread-safe (lock-free reads)
//
// Use cases:
// - Free paths with arbitrary pointers (hak_tiny_free_fast_v2)
// - External API boundaries
// - Default choice for unknown pointer origin
//
// Performance: ~50-100 cycles (hash table + linear probing)
// Safety: ✓ SAFE - guaranteed crash-free
//
// Note: Implemented as macro to avoid static/extern declaration conflicts
// hak_super_lookup() is defined in hakmem_super_registry.h
#define ss_lookup_safe(ptr) hak_super_lookup(ptr)
// ============================================================================
// Contract Level 3: GUARDED - Full validation (debug builds only)
// ============================================================================
//
// Note: This API is only available in debug builds to avoid circular dependency issues
// In release builds, use ss_lookup_safe() directly
//
#if !HAKMEM_BUILD_RELEASE
static inline SuperSlab* ss_lookup_guarded(void* ptr)
{
SuperSlab* ss = hak_super_lookup(ptr); // Direct call, not via macro
if (!ss) return NULL;
// Debug mode: additional integrity checks
uint32_t refcount = atomic_load_explicit(&ss->refcount, memory_order_relaxed);
if (refcount == 0 || refcount > 1000000) {
fprintf(stderr, "[SS_LOOKUP_GUARDED] WARNING: ptr=%p ss=%p refcount=%u (suspicious)\n",
ptr, (void*)ss, refcount);
}
if (ss->magic != SUPERSLAB_MAGIC) {
fprintf(stderr, "[SS_LOOKUP_GUARDED] ERROR: ptr=%p ss=%p magic=%llx (corrupted!)\n",
ptr, (void*)ss, (unsigned long long)ss->magic);
return NULL;
}
return ss;
}
#else
// Release build: ss_lookup_guarded() not available, use ss_lookup_safe() instead
#define ss_lookup_guarded(ptr) ss_lookup_safe(ptr)
#endif
// ============================================================================
// Backward Compatibility
// ============================================================================
// Legacy API: ss_fast_lookup() → ss_lookup_safe()
#define ss_fast_lookup(ptr) ss_lookup_safe(ptr)
// Return maximum number of slabs for this SuperSlab based on lg_size.
static inline int ss_slabs_capacity(SuperSlab* ss)
{
if (!ss) return 0;
size_t ss_size = (size_t)1 << ss->lg_size;
return (int)(ss_size / SLAB_SIZE);
}
// Compute slab base pointer for given (ss, slab_idx).
// Box 5 wrapper: delegate to Box 3 canonical geometry helper.
static inline uint8_t* tiny_slab_base_for(SuperSlab* ss, int slab_idx)
{
if (!ss || slab_idx < 0) {
return NULL;
}
return tiny_slab_base_for_geometry(ss, slab_idx);
}
// Compute slab index for a pointer inside ss.
// Box 5 wrapper: inverse of Box 3 geometry (tiny_slab_base_for_geometry).
// Layout (data regions):
// - Slab 0: [ss + SUPERSLAB_SLAB0_DATA_OFFSET, ss + SLAB_SIZE)
// - Slab 1: [ss + 1*SLAB_SIZE, ss + 2*SLAB_SIZE)
// - Slab k: [ss + k*SLAB_SIZE, ss + (k+1)*SLAB_SIZE)
static inline int slab_index_for(SuperSlab* ss, void* ptr)
{
if (!ss || !ptr) {
return -1;
}
uintptr_t base = (uintptr_t)ss;
uintptr_t p = (uintptr_t)ptr;
size_t ss_size = (size_t)1 << ss->lg_size;
// Outside overall SuperSlab range
if (p < base + SUPERSLAB_SLAB0_DATA_OFFSET || p >= base + ss_size) {
return -1;
}
// Slab 0: from first data byte up to the end of first slab
if (p < base + SLAB_SIZE) {
return 0;
}
// Slabs 1+ use simple SLAB_SIZE spacing from SuperSlab base
size_t rel = p - base;
int idx = (int)(rel / SLAB_SIZE);
if (idx < 0 || idx >= SLABS_PER_SUPERSLAB_MAX) {
return -1;
}
return idx;
}
// P1.1: Get class_idx from class_map (out-of-band lookup, avoids reading TinySlabMeta)
// Purpose: Free path optimization - read class_idx without touching cold metadata
// Returns: class_idx (0-7) or 255 if slab is unassigned or invalid
static inline int tiny_get_class_from_ss(SuperSlab* ss, int slab_idx)
{
if (!ss || slab_idx < 0 || slab_idx >= SLABS_PER_SUPERSLAB_MAX) {
return 255; // Invalid input
}
return (int)ss->class_map[slab_idx];
}
// Simple ref helpers used by lifecycle paths.
static inline uint32_t superslab_ref_get(SuperSlab* ss)
{
return ss ? atomic_load_explicit(&ss->refcount, memory_order_acquire) : 0;
}
static inline void superslab_ref_inc(SuperSlab* ss)
{
if (ss) {
atomic_fetch_add_explicit(&ss->refcount, 1, memory_order_acq_rel);
}
}
static inline void superslab_ref_dec(SuperSlab* ss)
{
if (ss) {
uint32_t prev = atomic_fetch_sub_explicit(&ss->refcount, 1, memory_order_acq_rel);
(void)prev; // caller decides when to free; we just provide the primitive
}
}
// Ownership helpers (Box 3)
static inline int ss_owner_try_acquire(TinySlabMeta* m, uint32_t tid)
{
if (!m) return 0;
uint8_t want = (uint8_t)((tid >> 8) & 0xFFu);
uint8_t expected = 0;
return __atomic_compare_exchange_n(&m->owner_tid_low, &expected, want,
false, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
}
static inline void ss_owner_release(TinySlabMeta* m, uint32_t tid)
{
if (!m) return;
uint8_t expected = (uint8_t)((tid >> 8) & 0xFFu);
(void)__atomic_compare_exchange_n(&m->owner_tid_low, &expected, 0u,
false, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
}
static inline int ss_owner_is_mine(TinySlabMeta* m, uint32_t tid)
{
if (!m) return 0;
uint8_t cur = __atomic_load_n(&m->owner_tid_low, __ATOMIC_RELAXED);
return cur == (uint8_t)((tid >> 8) & 0xFFu);
}
// Active block accounting (saturating dec by 1)
static inline void ss_active_dec_one(SuperSlab* ss)
{
if (!ss) return;
atomic_fetch_add_explicit(&g_ss_active_dec_calls, 1, memory_order_relaxed);
uint32_t cur = atomic_load_explicit(&ss->total_active_blocks, memory_order_relaxed);
while (cur != 0) {
if (atomic_compare_exchange_weak_explicit(&ss->total_active_blocks,
&cur,
cur - 1u,
memory_order_acq_rel,
memory_order_relaxed)) {
return;
}
// cur updated by failed CAS; loop
}
}
// Remote push helper (Box 2):
// - Enqueue node to per-slab MPSC stack
// - Returns 1 if transition empty->nonempty, otherwise 0
// - Also decrements ss->total_active_blocks once (free completed)
static inline int ss_remote_push(SuperSlab* ss, int slab_idx, void* node)
{
if (!ss || slab_idx < 0 || slab_idx >= SLABS_PER_SUPERSLAB_MAX || !node) {
return -1;
}
_Atomic uintptr_t* head = &ss->remote_heads[slab_idx];
uintptr_t old_head;
uintptr_t new_head;
int transitioned = 0;
do {
old_head = atomic_load_explicit(head, memory_order_acquire);
// next ポインタは tiny_next_ptr_box / tiny_nextptr 等で扱う前提だが、
// ここでは単純に単方向リストとして積む(上位が decode する)。
*(uintptr_t*)node = old_head;
new_head = (uintptr_t)node;
} while (!atomic_compare_exchange_weak_explicit(
head, &old_head, new_head,
memory_order_release, memory_order_relaxed));
transitioned = (old_head == 0) ? 1 : 0;
atomic_fetch_add_explicit(&ss->remote_counts[slab_idx], 1, memory_order_acq_rel);
// account active block removal once per free
ss_active_dec_one(ss);
return transitioned;
}
#endif // SUPERSLAB_INLINE_H