diff --git a/core/hakmem_shared_pool.c b/core/hakmem_shared_pool.c index daaeb6c2..71a5129e 100644 --- a/core/hakmem_shared_pool.c +++ b/core/hakmem_shared_pool.c @@ -563,6 +563,18 @@ shared_pool_acquire_slab(int class_idx, SuperSlab** ss_out, int* slab_idx_out) // RACE FIX: Load SuperSlab pointer atomically (consistency) SuperSlab* ss = atomic_load_explicit(&reuse_meta->ss, memory_order_relaxed); + // RACE FIX: Check if SuperSlab was freed (NULL pointer) + // This can happen if Thread A freed the SuperSlab after pushing slot to freelist, + // but Thread B popped the stale slot before the freelist was cleared. + if (!ss) { + // SuperSlab freed - skip and fall through to Stage 2/3 + if (g_lock_stats_enabled == 1) { + atomic_fetch_add(&g_lock_release_count, 1); + } + pthread_mutex_unlock(&g_shared_pool.alloc_lock); + goto stage2_fallback; + } + if (dbg_acquire == 1) { fprintf(stderr, "[SP_ACQUIRE_STAGE1_LOCKFREE] class=%d reusing EMPTY slot (ss=%p slab=%d)\n", class_idx, (void*)ss, reuse_slot_idx); @@ -598,6 +610,7 @@ shared_pool_acquire_slab(int class_idx, SuperSlab** ss_out, int* slab_idx_out) pthread_mutex_unlock(&g_shared_pool.alloc_lock); } +stage2_fallback: // ========== Stage 2 (Lock-Free): Try to claim UNUSED slots ========== // P0-5: Lock-free atomic CAS claiming (no mutex needed for slot state transition!) // RACE FIX: Read ss_meta_count atomically (now properly declared as _Atomic)