Files
hakmem/core/hakmem_tiny_slab_mgmt.inc

164 lines
5.4 KiB
PHP
Raw Normal View History

// hakmem_tiny_slab_mgmt.inc
// Phase 2D-4 (FINAL): Slab management functions extraction
//
// This file contains slab management functions extracted from hakmem_tiny.c
// to improve code organization. Reduces main file by ~142 lines (12%).
//
// Functions:
// - allocate_new_slab(): Allocate new slab for a class
// - release_slab(): Release a slab back to system
// - move_to_full_list(): Move slab to full list
// - move_to_free_list(): Move slab to free list
//
// Slab lifecycle management - not on hot path.
// Allocate a new slab for the given class
static TinySlab* allocate_new_slab(int class_idx) {
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) return NULL;
// Allocate slab header (Phase 6.X P0 Fix: use hkm_libc_malloc)
TinySlab* slab = (TinySlab*)hkm_libc_malloc(sizeof(TinySlab));
if (!slab) return NULL;
// Initialize new fields that are not necessarily zeroed by malloc
slab->remote_q_next = NULL;
atomic_store_explicit(&slab->remote_queued, 0u, memory_order_relaxed);
// Allocate bitmap (Phase 6.X P0 Fix: use hkm_libc_calloc)
int bitmap_size = g_tiny_bitmap_words[class_idx];
slab->bitmap = (uint64_t*)hkm_libc_calloc(bitmap_size, sizeof(uint64_t));
if (!slab->bitmap) {
hkm_libc_free(slab);
return NULL;
}
// Allocate 64KB aligned memory via aligned_alloc (C11)
// Requirement: size is a multiple of alignment (true for 64KB)
void* aligned_mem = aligned_alloc(TINY_SLAB_SIZE, TINY_SLAB_SIZE);
if (!aligned_mem) {
hkm_libc_free(slab->bitmap);
hkm_libc_free(slab);
return NULL;
}
slab->base = aligned_mem;
// Step 1: Simple initialization (SlabTag removed for performance)
slab->total_count = g_tiny_blocks_per_slab[class_idx];
slab->free_count = slab->total_count;
slab->class_idx = class_idx;
slab->next = NULL;
atomic_store(&slab->remote_head, (uintptr_t)NULL);
atomic_store(&slab->remote_count, 0u);
slab->owner_tid = tiny_self_pt();
slab->hint_word = 0;
// Allocate and initialize summary bitmap (level-2)
int summary_words = (bitmap_size + 63) / 64;
slab->summary_words = (uint8_t)summary_words;
slab->summary = (uint64_t*)hkm_libc_calloc(summary_words, sizeof(uint64_t));
if (!slab->summary) {
hkm_libc_free(slab->bitmap);
hkm_libc_free(slab->base);
hkm_libc_free(slab);
return NULL;
}
for (int i = 0; i < bitmap_size; i++) {
slab->summary[i / 64] |= (1ULL << (i % 64)); // all words have free bits initially
}
slab->hint_word = 0;
// Phase 1: Initialize page mini-magazine (32-64 items based on class)
// Quick Win #3: Increased capacity to reduce bitmap scan frequency by 50%
// Smaller classes (8B-64B) = larger magazine (better hit rate)
// Larger classes (128B-1KB) = smaller magazine (less memory overhead)
uint16_t mag_capacity = (class_idx <= 3) ? 64 : 32; // Was: 32, 16 → tuned earlier but reverted
mini_mag_init(&slab->mini_mag, mag_capacity);
// Step 2: Register slab in hash table for O(1) lookup (only if enabled)
if (g_use_registry) {
uintptr_t slab_base = (uintptr_t)aligned_mem;
{
int ok = registry_register(slab_base, slab);
if (!ok) {
// Registry full - cleanup and fail
hkm_libc_free(slab->bitmap);
hkm_libc_free(slab->base);
hkm_libc_free(slab);
return NULL;
}
}
}
g_tiny_pool.slab_count[class_idx]++;
return slab;
}
// Release a slab
static void release_slab(TinySlab* slab) {
if (!slab) return;
// Phase 1: Spill mini-magazine back to bitmap before release
// This ensures bitmap consistency (all free blocks accounted for)
if (!mini_mag_is_empty(&slab->mini_mag)) {
batch_spill_to_bitmap(slab, &slab->mini_mag);
}
// Step 2: Unregister from hash table (only if enabled)
if (g_use_registry) {
uintptr_t slab_base = (uintptr_t)slab->base;
registry_unregister(slab_base);
}
// Cleanup
hkm_libc_free(slab->base);
hkm_libc_free(slab->bitmap);
hkm_libc_free(slab->summary);
g_tiny_pool.slab_count[slab->class_idx]--;
hkm_libc_free(slab);
}
// Move slab to full list
static void move_to_full_list(int class_idx, TinySlab* target_slab) {
// Remove from free list
TinySlab** head = &g_tiny_pool.free_slabs[class_idx];
TinySlab* prev = NULL;
for (TinySlab* slab = *head; slab; prev = slab, slab = slab->next) {
if (slab == target_slab) {
if (prev) {
prev->next = slab->next;
} else {
*head = slab->next;
}
break;
}
}
// Add to full list
target_slab->next = g_tiny_pool.full_slabs[class_idx];
g_tiny_pool.full_slabs[class_idx] = target_slab;
}
// Move slab to free list
static void move_to_free_list(int class_idx, TinySlab* target_slab) {
// Remove from full list
TinySlab** head = &g_tiny_pool.full_slabs[class_idx];
TinySlab* prev = NULL;
for (TinySlab* slab = *head; slab; prev = slab, slab = slab->next) {
if (slab == target_slab) {
if (prev) {
prev->next = slab->next;
} else {
*head = slab->next;
}
break;
}
}
// Add to free list
target_slab->next = g_tiny_pool.free_slabs[class_idx];
g_tiny_pool.free_slabs[class_idx] = target_slab;
}