diff --git a/core/hakmem_shared_pool.c b/core/hakmem_shared_pool.c index deafd0e6..484ba5b1 100644 --- a/core/hakmem_shared_pool.c +++ b/core/hakmem_shared_pool.c @@ -412,6 +412,45 @@ static int sp_slot_mark_empty(SharedSSMeta* meta, int slot_idx) { return -1; // Not ACTIVE } +// Sync SP-SLOT view from an existing SuperSlab. +// This is needed when a legacy-allocated SuperSlab reaches the shared-pool +// release path for the first time (slot states are still SLOT_UNUSED). +static void sp_meta_sync_slots_from_ss(SharedSSMeta* meta, SuperSlab* ss) { + if (!meta || !ss) return; + + int cap = ss_slabs_capacity(ss); + if (cap > MAX_SLOTS_PER_SS) { + cap = MAX_SLOTS_PER_SS; + } + + meta->total_slots = (uint8_t)cap; + meta->active_slots = 0; + + for (int i = 0; i < cap; i++) { + SlotState state = SLOT_UNUSED; + uint32_t bit = (1u << i); + if (ss->slab_bitmap & bit) { + state = SLOT_ACTIVE; + meta->active_slots++; + } else { + TinySlabMeta* smeta = &ss->slabs[i]; + uint16_t used = atomic_load_explicit(&smeta->used, memory_order_relaxed); + if (smeta->capacity > 0 && used == 0) { + state = SLOT_EMPTY; + } + } + + uint8_t cls = ss->class_map[i]; + if (cls == 255) { + cls = ss->slabs[i].class_idx; + } + + meta->slots[i].class_idx = cls; + meta->slots[i].slab_idx = (uint8_t)i; + atomic_store_explicit(&meta->slots[i].state, state, memory_order_release); + } +} + // ---------- Layer 2: Metadata Management (Mid-level) ---------- // Ensure ss_metadata array has capacity for at least min_count entries @@ -1297,7 +1336,19 @@ shared_pool_release_slab(SuperSlab* ss, int slab_idx) } // Mark slot as EMPTY (ACTIVE → EMPTY) - if (sp_slot_mark_empty(sp_meta, slab_idx) != 0) { + uint32_t slab_bit = (1u << slab_idx); + SlotState slot_state = atomic_load_explicit( + &sp_meta->slots[slab_idx].state, + memory_order_acquire); + if (slot_state != SLOT_ACTIVE && (ss->slab_bitmap & slab_bit)) { + // Legacy path import: rebuild slot states from SuperSlab bitmap/class_map + sp_meta_sync_slots_from_ss(sp_meta, ss); + slot_state = atomic_load_explicit( + &sp_meta->slots[slab_idx].state, + memory_order_acquire); + } + + if (slot_state != SLOT_ACTIVE || sp_slot_mark_empty(sp_meta, slab_idx) != 0) { if (g_lock_stats_enabled == 1) { atomic_fetch_add(&g_lock_release_count, 1); }