Files
hakmem/core/hakmem_shared_pool.h
Moe Charm (CI) 9b0d746407 Phase 3d-B: TLS Cache Merge - Unified g_tls_sll[] structure (+12-18% expected)
Merge separate g_tls_sll_head[] and g_tls_sll_count[] arrays into unified
TinyTLSSLL struct to improve L1D cache locality. Expected performance gain:
+12-18% from reducing cache line splits (2 loads → 1 load per operation).

Changes:
- core/hakmem_tiny.h: Add TinyTLSSLL type (16B aligned, head+count+pad)
- core/hakmem_tiny.c: Replace separate arrays with g_tls_sll[8]
- core/box/tls_sll_box.h: Update Box API (13 sites) for unified access
- Updated 32+ files: All g_tls_sll_head[i] → g_tls_sll[i].head
- Updated 32+ files: All g_tls_sll_count[i] → g_tls_sll[i].count
- core/hakmem_tiny_integrity.h: Unified canary guards
- core/box/integrity_box.c: Simplified canary validation
- Makefile: Added core/box/tiny_sizeclass_hist_box.o to link

Build:  PASS (10K ops sanity test)
Warnings: Only pre-existing LTO type mismatches (unrelated)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-20 07:32:30 +09:00

158 lines
6.4 KiB
C

#pragma once
#include <stdint.h>
#include <pthread.h>
#include <stdatomic.h>
#include "superslab/superslab_types.h"
// Shared SuperSlab Pool (Phase 12-2 skeleton)
// Multiple tiny size classes share a global set of SuperSlab instances.
// This header exposes the minimal API used by refill/free hot paths in Phase 12.
#ifdef __cplusplus
extern "C" {
#endif
// ============================================================================
// Phase 12: SP-SLOT Box - Per-Slot State Management
// ============================================================================
//
// Problem:
// - Current design: 1 SuperSlab mixes multiple classes (C0-C7)
// - SuperSlab freed only when ALL classes empty (active_slabs==0)
// - Result: SuperSlabs rarely freed, LRU cache unused
//
// Solution:
// - Track each slab slot's state individually (UNUSED/ACTIVE/EMPTY)
// - Maintain per-class free slot lists for reuse
// - Free SuperSlab only when ALL slots empty
//
// Benefits:
// - Empty slabs from one class can be reused by same class immediately
// - Reduces mmap/munmap churn significantly
// - Enables LRU cache for fully empty SuperSlabs
// Slot state for each (SuperSlab, slab_idx) pair
typedef enum {
SLOT_UNUSED = 0, // Never used yet
SLOT_ACTIVE, // Assigned to a class (meta->used > 0 or freelist non-empty)
SLOT_EMPTY // Was assigned, now empty (meta->used==0, remote==0)
} SlotState;
// Per-slot metadata
// P0-5: state is atomic for lock-free claiming
typedef struct {
_Atomic SlotState state; // Atomic for lock-free CAS (UNUSED→ACTIVE)
uint8_t class_idx; // Valid when state != SLOT_UNUSED (0-7)
uint8_t slab_idx; // SuperSlab-internal index (0-31)
} SharedSlot;
// Per-SuperSlab metadata for slot management
#define MAX_SLOTS_PER_SS 32 // Typical: 1MB SS has 32 slabs of 32KB each
typedef struct SharedSSMeta {
_Atomic(SuperSlab*) ss; // Physical SuperSlab pointer (atomic for lock-free Stage 2)
SharedSlot slots[MAX_SLOTS_PER_SS]; // Slot state for each slab
uint8_t active_slots; // Number of SLOT_ACTIVE slots
uint8_t total_slots; // Total available slots (from ss_slabs_capacity)
struct SharedSSMeta* next; // For free list linking
} SharedSSMeta;
// ============================================================================
// P0-4: Lock-Free Free Slot List (LIFO Stack)
// ============================================================================
// Free slot node for lock-free linked list
typedef struct FreeSlotNode {
SharedSSMeta* meta; // Which SuperSlab metadata
uint8_t slot_idx; // Which slot within that SuperSlab
struct FreeSlotNode* next; // Next node in LIFO stack
} FreeSlotNode;
// Lock-free per-class free slot list (LIFO stack with atomic head)
typedef struct {
_Atomic(FreeSlotNode*) head; // Atomic stack head pointer
} LockFreeFreeList;
// Node pool for lock-free allocation (avoid malloc/free)
#define MAX_FREE_NODES_PER_CLASS 4096 // Pre-allocated nodes per class (increased for 500K+ iterations)
extern FreeSlotNode g_free_node_pool[TINY_NUM_CLASSES_SS][MAX_FREE_NODES_PER_CLASS];
extern _Atomic uint32_t g_node_alloc_index[TINY_NUM_CLASSES_SS];
// ============================================================================
// Legacy Free Slot List (for comparison, will be removed after P0-4)
// ============================================================================
// Free slot entry for per-class reuse lists
typedef struct {
SharedSSMeta* meta; // Which SuperSlab metadata
uint8_t slot_idx; // Which slot within that SuperSlab
} FreeSlotEntry;
// Per-class free slot list (max capacity for now: 256 entries per class)
#define MAX_FREE_SLOTS_PER_CLASS 256
typedef struct {
FreeSlotEntry entries[MAX_FREE_SLOTS_PER_CLASS];
uint32_t count; // Number of free slots available
} FreeSlotList;
typedef struct SharedSuperSlabPool {
SuperSlab** slabs; // Dynamic array of SuperSlab*
uint32_t capacity; // Allocated entries in slabs[]
uint32_t total_count; // Total SuperSlabs ever allocated (<= capacity)
uint32_t active_count; // SuperSlabs that have >0 active slabs
pthread_mutex_t alloc_lock; // Protects pool metadata and grow/scan operations
// Per-class hints: last known SuperSlab with a free slab for that class.
// Read lock-free (best-effort), updated under alloc_lock.
SuperSlab* class_hints[TINY_NUM_CLASSES_SS];
// Approximate per-class ACTIVE slot counts (Tiny classes 0..7).
// Updated under alloc_lock; read by learning layer and stats snapshot.
uint32_t class_active_slots[TINY_NUM_CLASSES_SS];
// LRU cache integration hooks (Phase 9/12, optional for now)
SuperSlab* lru_head;
SuperSlab* lru_tail;
uint32_t lru_count;
// ========== Phase 12: SP-SLOT Management ==========
// P0-4: Lock-free per-class free slot lists (atomic LIFO stacks)
LockFreeFreeList free_slots_lockfree[TINY_NUM_CLASSES_SS];
// Legacy: Per-class free slot lists (mutex-protected, for comparison)
FreeSlotList free_slots[TINY_NUM_CLASSES_SS];
// SharedSSMeta array for all SuperSlabs in pool
// RACE FIX: Fixed-size array (no realloc!) to avoid race with lock-free Stage 2
// LARSON FIX (2025-11-16): Increased from 2048 → 8192 for MT churn workloads
#define MAX_SS_METADATA_ENTRIES 8192
SharedSSMeta ss_metadata[MAX_SS_METADATA_ENTRIES]; // Fixed-size array
_Atomic uint32_t ss_meta_count; // Used entries (atomic for lock-free Stage 2)
} SharedSuperSlabPool;
// Global singleton
extern SharedSuperSlabPool g_shared_pool;
// Initialize shared pool (idempotent, thread-safe wrt multiple callers on startup paths)
void shared_pool_init(void);
// Get/allocate a SuperSlab registered in the pool.
// Returns non-NULL on success, NULL on failure.
SuperSlab* shared_pool_acquire_superslab(void);
// Acquire a slab for class_idx from shared pool.
// On success:
// *ss_out = SuperSlab containing slab
// *slab_idx_out = slab index [0, SLABS_PER_SUPERSLAB_MAX)
// Returns 0 on success, non-zero on failure.
int shared_pool_acquire_slab(int class_idx, SuperSlab** ss_out, int* slab_idx_out);
// Release an empty slab back to pool (mark as unassigned).
// Caller must ensure TinySlabMeta.used == 0.
void shared_pool_release_slab(SuperSlab* ss, int slab_idx);
#ifdef __cplusplus
}
#endif