2025-12-04 14:22:48 +09:00
|
|
|
// Archived legacy SuperSlabHead implementation.
|
|
|
|
|
// Not compiled by default; kept for reference / A/B rollback.
|
|
|
|
|
// Source moved from core/superslab_head.c after shared-pool backend migration.
|
2025-11-29 05:13:04 +09:00
|
|
|
|
2025-12-04 14:22:48 +09:00
|
|
|
#include "../core/hakmem_tiny_superslab_internal.h"
|
2025-11-29 05:13:04 +09:00
|
|
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
// Phase 2a: Dynamic Expansion - Global per-class SuperSlabHeads
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
|
|
|
|
SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS] = {NULL};
|
|
|
|
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
// SuperSlabHead Management Functions
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
|
|
|
|
// Initialize SuperSlabHead for a class
|
|
|
|
|
SuperSlabHead* init_superslab_head(int class_idx) {
|
|
|
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Allocate SuperSlabHead structure
|
|
|
|
|
SuperSlabHead* head = (SuperSlabHead*)calloc(1, sizeof(SuperSlabHead));
|
|
|
|
|
if (!head) {
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate SuperSlabHead for class %d\n", class_idx);
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
head->class_idx = (uint8_t)class_idx;
|
|
|
|
|
atomic_store_explicit(&head->total_chunks, 0, memory_order_relaxed);
|
|
|
|
|
head->first_chunk = NULL;
|
|
|
|
|
head->current_chunk = NULL;
|
|
|
|
|
pthread_mutex_init(&head->expansion_lock, NULL);
|
|
|
|
|
|
|
|
|
|
// Allocate initial chunk(s)
|
|
|
|
|
int initial_chunks = 1;
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < initial_chunks; i++) {
|
|
|
|
|
if (expand_superslab_head(head) < 0) {
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate initial chunk %d for class %d\n",
|
|
|
|
|
i, class_idx);
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
|
|
|
|
|
// Cleanup on failure
|
|
|
|
|
SuperSlab* chunk = head->first_chunk;
|
|
|
|
|
while (chunk) {
|
|
|
|
|
SuperSlab* next = chunk->next_chunk;
|
|
|
|
|
superslab_free(chunk);
|
|
|
|
|
chunk = next;
|
|
|
|
|
}
|
|
|
|
|
pthread_mutex_destroy(&head->expansion_lock);
|
|
|
|
|
free(head);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
fprintf(stderr, "[HAKMEM] Initialized SuperSlabHead for class %d: %zu initial chunks\n",
|
|
|
|
|
class_idx, atomic_load_explicit(&head->total_chunks, memory_order_relaxed));
|
|
|
|
|
#endif
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
|
|
|
|
|
return head;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Expand SuperSlabHead by allocating and linking a new chunk
|
|
|
|
|
int expand_superslab_head(SuperSlabHead* head) {
|
|
|
|
|
if (!head) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Allocate new chunk via existing superslab_allocate
|
|
|
|
|
SuperSlab* new_chunk = superslab_allocate(head->class_idx);
|
|
|
|
|
if (!new_chunk) {
|
|
|
|
|
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate new chunk for class %d (system OOM)\n",
|
|
|
|
|
head->class_idx);
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
#endif
|
|
|
|
|
return -1; // True OOM (system out of memory)
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-04 14:22:48 +09:00
|
|
|
// Initialize slab 0 so bitmap != 0x00000000
|
2025-11-29 05:13:04 +09:00
|
|
|
size_t block_size = g_tiny_class_sizes[head->class_idx];
|
|
|
|
|
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
|
|
|
|
|
|
|
|
|
|
superslab_init_slab(new_chunk, 0, block_size, owner_tid);
|
|
|
|
|
|
|
|
|
|
// Initialize the next_chunk link to NULL
|
|
|
|
|
new_chunk->next_chunk = NULL;
|
|
|
|
|
|
|
|
|
|
// Thread-safe linking
|
|
|
|
|
pthread_mutex_lock(&head->expansion_lock);
|
|
|
|
|
|
|
|
|
|
if (head->current_chunk) {
|
|
|
|
|
SuperSlab* tail = head->current_chunk;
|
|
|
|
|
while (tail->next_chunk) {
|
|
|
|
|
tail = tail->next_chunk;
|
|
|
|
|
}
|
|
|
|
|
tail->next_chunk = new_chunk;
|
|
|
|
|
} else {
|
|
|
|
|
head->first_chunk = new_chunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
head->current_chunk = new_chunk;
|
|
|
|
|
|
|
|
|
|
size_t old_count = atomic_fetch_add_explicit(&head->total_chunks, 1, memory_order_relaxed);
|
|
|
|
|
size_t new_count = old_count + 1;
|
|
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
|
|
|
|
|
|
|
|
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
fprintf(stderr, "[HAKMEM] Expanded SuperSlabHead for class %d: %zu chunks now (bitmap=0x%08x)\n",
|
|
|
|
|
head->class_idx, new_count, new_chunk->slab_bitmap);
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Find which chunk a pointer belongs to
|
|
|
|
|
SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx) {
|
|
|
|
|
if (!ptr || class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SuperSlabHead* head = g_superslab_heads[class_idx];
|
|
|
|
|
if (!head) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uintptr_t ptr_addr = (uintptr_t)ptr;
|
|
|
|
|
|
|
|
|
|
SuperSlab* chunk = head->first_chunk;
|
|
|
|
|
while (chunk) {
|
|
|
|
|
uintptr_t chunk_start = (uintptr_t)chunk;
|
2025-12-04 14:22:48 +09:00
|
|
|
size_t chunk_size = (size_t)1 << chunk->lg_size;
|
2025-11-29 05:13:04 +09:00
|
|
|
uintptr_t chunk_end = chunk_start + chunk_size;
|
|
|
|
|
|
|
|
|
|
if (ptr_addr >= chunk_start && ptr_addr < chunk_end) {
|
|
|
|
|
return chunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chunk = chunk->next_chunk;
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-04 14:22:48 +09:00
|
|
|
return NULL;
|
2025-11-29 05:13:04 +09:00
|
|
|
}
|
2025-11-30 15:21:42 +09:00
|
|
|
|
|
|
|
|
// Remove SuperSlab from Legacy Backend list (for safe deallocation)
|
|
|
|
|
void remove_superslab_from_legacy_head(SuperSlab* ss) {
|
|
|
|
|
if (!ss) return;
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < TINY_NUM_CLASSES_SS; i++) {
|
|
|
|
|
SuperSlabHead* head = g_superslab_heads[i];
|
|
|
|
|
if (!head) continue;
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&head->expansion_lock);
|
|
|
|
|
|
|
|
|
|
if (head->first_chunk == ss) {
|
|
|
|
|
head->first_chunk = ss->next_chunk;
|
|
|
|
|
if (head->current_chunk == ss) head->current_chunk = head->first_chunk;
|
|
|
|
|
atomic_fetch_sub_explicit(&head->total_chunks, 1, memory_order_relaxed);
|
|
|
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SuperSlab* prev = head->first_chunk;
|
|
|
|
|
while (prev && prev->next_chunk) {
|
|
|
|
|
if (prev->next_chunk == ss) {
|
|
|
|
|
prev->next_chunk = ss->next_chunk;
|
|
|
|
|
if (head->current_chunk == ss) head->current_chunk = prev;
|
|
|
|
|
atomic_fetch_sub_explicit(&head->total_chunks, 1, memory_order_relaxed);
|
|
|
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
prev = prev->next_chunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-04 14:22:48 +09:00
|
|
|
|