Feat(phase9): Safe removal from legacy list on shared pool free (Task 2)

Added remove_superslab_from_legacy_head to safely unlink SuperSlabs from
legacy g_superslab_heads when freed by shared_pool_release_slab.
This prevents dangling pointers in the legacy backend if fallback allocation was used.
Called after unlocking alloc_lock to avoid lock inversion.
This commit is contained in:
Moe Charm (CI)
2025-11-30 15:21:42 +09:00
parent e3b0fdce57
commit 128883e7a8
3 changed files with 39 additions and 0 deletions

View File

@ -1489,6 +1489,10 @@ shared_pool_release_slab(SuperSlab* ss, int slab_idx)
pthread_mutex_unlock(&g_shared_pool.alloc_lock); pthread_mutex_unlock(&g_shared_pool.alloc_lock);
// Remove from legacy backend list (if present) to prevent dangling pointers
extern void remove_superslab_from_legacy_head(SuperSlab* ss);
remove_superslab_from_legacy_head(ss);
// Free SuperSlab: // Free SuperSlab:
// 1. Try LRU cache (hak_ss_lru_push) - lazy deallocation // 1. Try LRU cache (hak_ss_lru_push) - lazy deallocation
// 2. Or munmap if LRU is full - eager deallocation // 2. Or munmap if LRU is full - eager deallocation

View File

@ -182,5 +182,6 @@ void* hak_tiny_alloc_superslab_backend_shared(int class_idx);
SuperSlabHead* init_superslab_head(int class_idx); SuperSlabHead* init_superslab_head(int class_idx);
int expand_superslab_head(SuperSlabHead* head); int expand_superslab_head(SuperSlabHead* head);
SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx); SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx);
void remove_superslab_from_legacy_head(SuperSlab* ss);
#endif // HAKMEM_TINY_SUPERSLAB_INTERNAL_H #endif // HAKMEM_TINY_SUPERSLAB_INTERNAL_H

View File

@ -174,3 +174,37 @@ SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx) {
return NULL; // Not found in any chunk return NULL; // Not found in any chunk
} }
// Remove SuperSlab from Legacy Backend list (for safe deallocation)
void remove_superslab_from_legacy_head(SuperSlab* ss) {
if (!ss) return;
for (int i = 0; i < TINY_NUM_CLASSES_SS; i++) {
SuperSlabHead* head = g_superslab_heads[i];
if (!head) continue;
pthread_mutex_lock(&head->expansion_lock);
if (head->first_chunk == ss) {
head->first_chunk = ss->next_chunk;
if (head->current_chunk == ss) head->current_chunk = head->first_chunk;
atomic_fetch_sub_explicit(&head->total_chunks, 1, memory_order_relaxed);
pthread_mutex_unlock(&head->expansion_lock);
return;
}
SuperSlab* prev = head->first_chunk;
while (prev && prev->next_chunk) {
if (prev->next_chunk == ss) {
prev->next_chunk = ss->next_chunk;
if (head->current_chunk == ss) head->current_chunk = prev;
atomic_fetch_sub_explicit(&head->total_chunks, 1, memory_order_relaxed);
pthread_mutex_unlock(&head->expansion_lock);
return;
}
prev = prev->next_chunk;
}
pthread_mutex_unlock(&head->expansion_lock);
}
}