diff --git a/core/hakmem_shared_pool.c b/core/hakmem_shared_pool.c index 9f8da6da..16f454d8 100644 --- a/core/hakmem_shared_pool.c +++ b/core/hakmem_shared_pool.c @@ -1489,6 +1489,10 @@ shared_pool_release_slab(SuperSlab* ss, int slab_idx) pthread_mutex_unlock(&g_shared_pool.alloc_lock); + // Remove from legacy backend list (if present) to prevent dangling pointers + extern void remove_superslab_from_legacy_head(SuperSlab* ss); + remove_superslab_from_legacy_head(ss); + // Free SuperSlab: // 1. Try LRU cache (hak_ss_lru_push) - lazy deallocation // 2. Or munmap if LRU is full - eager deallocation diff --git a/core/hakmem_tiny_superslab_internal.h b/core/hakmem_tiny_superslab_internal.h index 5c4381a8..e9cccc7a 100644 --- a/core/hakmem_tiny_superslab_internal.h +++ b/core/hakmem_tiny_superslab_internal.h @@ -182,5 +182,6 @@ void* hak_tiny_alloc_superslab_backend_shared(int class_idx); SuperSlabHead* init_superslab_head(int class_idx); int expand_superslab_head(SuperSlabHead* head); SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx); +void remove_superslab_from_legacy_head(SuperSlab* ss); #endif // HAKMEM_TINY_SUPERSLAB_INTERNAL_H diff --git a/core/superslab_head.c b/core/superslab_head.c index e9841181..ca785391 100644 --- a/core/superslab_head.c +++ b/core/superslab_head.c @@ -174,3 +174,37 @@ SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx) { return NULL; // Not found in any chunk } + +// Remove SuperSlab from Legacy Backend list (for safe deallocation) +void remove_superslab_from_legacy_head(SuperSlab* ss) { + if (!ss) return; + + for (int i = 0; i < TINY_NUM_CLASSES_SS; i++) { + SuperSlabHead* head = g_superslab_heads[i]; + if (!head) continue; + + pthread_mutex_lock(&head->expansion_lock); + + if (head->first_chunk == ss) { + head->first_chunk = ss->next_chunk; + if (head->current_chunk == ss) head->current_chunk = head->first_chunk; + atomic_fetch_sub_explicit(&head->total_chunks, 1, memory_order_relaxed); + pthread_mutex_unlock(&head->expansion_lock); + return; + } + + SuperSlab* prev = head->first_chunk; + while (prev && prev->next_chunk) { + if (prev->next_chunk == ss) { + prev->next_chunk = ss->next_chunk; + if (head->current_chunk == ss) head->current_chunk = prev; + atomic_fetch_sub_explicit(&head->total_chunks, 1, memory_order_relaxed); + pthread_mutex_unlock(&head->expansion_lock); + return; + } + prev = prev->next_chunk; + } + + pthread_mutex_unlock(&head->expansion_lock); + } +}