diff --git a/core/hakmem_shared_pool.c b/core/hakmem_shared_pool.c index 68ec1aae..86151bb3 100644 --- a/core/hakmem_shared_pool.c +++ b/core/hakmem_shared_pool.c @@ -56,7 +56,7 @@ static void __attribute__((destructor)) lock_stats_report(void) { FreeSlotNode g_free_node_pool[TINY_NUM_CLASSES_SS][MAX_FREE_NODES_PER_CLASS]; _Atomic uint32_t g_node_alloc_index[TINY_NUM_CLASSES_SS] = {0}; -// Allocate a node from pool (lock-free, never fails until pool exhausted) +// Allocate a node from pool (lock-free fast path, may fall back to legacy path) static inline FreeSlotNode* node_alloc(int class_idx) { if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) { return NULL; @@ -64,7 +64,8 @@ static inline FreeSlotNode* node_alloc(int class_idx) { uint32_t idx = atomic_fetch_add(&g_node_alloc_index[class_idx], 1); if (idx >= MAX_FREE_NODES_PER_CLASS) { - // Pool exhausted - should not happen in practice + // Pool exhausted - should be rare. Caller must fall back to legacy + // mutex-protected free list to preserve correctness. static _Atomic int warn_once = 0; if (atomic_exchange(&warn_once, 1) == 0) { fprintf(stderr, "[P0-4 WARN] Node pool exhausted for class %d\n", class_idx); @@ -379,7 +380,11 @@ static int sp_freelist_push_lockfree(int class_idx, SharedSSMeta* meta, int slot // Allocate node from pool FreeSlotNode* node = node_alloc(class_idx); if (!node) { - return -1; // Pool exhausted + // Fallback: push into legacy per-class free list + // ASSUME: Caller already holds alloc_lock (e.g., shared_pool_release_slab:772) + // Do NOT lock again to avoid deadlock on non-recursive mutex! + (void)sp_freelist_push(class_idx, meta, slot_idx); + return 0; } // Fill node data diff --git a/core/hakmem_shared_pool.h b/core/hakmem_shared_pool.h index 449d84ff..b387ff55 100644 --- a/core/hakmem_shared_pool.h +++ b/core/hakmem_shared_pool.h @@ -74,7 +74,7 @@ typedef struct { } LockFreeFreeList; // Node pool for lock-free allocation (avoid malloc/free) -#define MAX_FREE_NODES_PER_CLASS 512 // Pre-allocated nodes per class +#define MAX_FREE_NODES_PER_CLASS 4096 // Pre-allocated nodes per class (increased for 500K+ iterations) extern FreeSlotNode g_free_node_pool[TINY_NUM_CLASSES_SS][MAX_FREE_NODES_PER_CLASS]; extern _Atomic uint32_t g_node_alloc_index[TINY_NUM_CLASSES_SS];