#pragma once #include #include #include #include "superslab/superslab_types.h" // Shared SuperSlab Pool (Phase 12-2 skeleton) // Multiple tiny size classes share a global set of SuperSlab instances. // This header exposes the minimal API used by refill/free hot paths in Phase 12. #ifdef __cplusplus extern "C" { #endif typedef struct SharedSuperSlabPool { SuperSlab** slabs; // Dynamic array of SuperSlab* uint32_t capacity; // Allocated entries in slabs[] uint32_t total_count; // Total SuperSlabs ever allocated (<= capacity) uint32_t active_count; // SuperSlabs that have >0 active slabs pthread_mutex_t alloc_lock; // Protects pool metadata and grow/scan operations // Per-class hints: last known SuperSlab with a free slab for that class. // Read lock-free (best-effort), updated under alloc_lock. SuperSlab* class_hints[TINY_NUM_CLASSES_SS]; // LRU cache integration hooks (Phase 9/12, optional for now) SuperSlab* lru_head; SuperSlab* lru_tail; uint32_t lru_count; } SharedSuperSlabPool; // Global singleton extern SharedSuperSlabPool g_shared_pool; // Initialize shared pool (idempotent, thread-safe wrt multiple callers on startup paths) void shared_pool_init(void); // Get/allocate a SuperSlab registered in the pool. // Returns non-NULL on success, NULL on failure. SuperSlab* shared_pool_acquire_superslab(void); // Acquire a slab for class_idx from shared pool. // On success: // *ss_out = SuperSlab containing slab // *slab_idx_out = slab index [0, SLABS_PER_SUPERSLAB_MAX) // Returns 0 on success, non-zero on failure. int shared_pool_acquire_slab(int class_idx, SuperSlab** ss_out, int* slab_idx_out); // Release an empty slab back to pool (mark as unassigned). // Caller must ensure TinySlabMeta.used == 0. void shared_pool_release_slab(SuperSlab* ss, int slab_idx); #ifdef __cplusplus } #endif