Phase 12: Shared SuperSlab Pool implementation (WIP - runtime crash)
## Summary
Implemented Phase 12 Shared SuperSlab Pool (mimalloc-style) to address
SuperSlab allocation churn (877 SuperSlabs → 100-200 target).
## Implementation (ChatGPT + Claude)
1. **Metadata changes** (superslab_types.h):
- Added class_idx to TinySlabMeta (per-slab dynamic class)
- Removed size_class from SuperSlab (no longer per-SuperSlab)
- Changed owner_tid (16-bit) → owner_tid_low (8-bit)
2. **Shared Pool** (hakmem_shared_pool.{h,c}):
- Global pool shared by all size classes
- shared_pool_acquire_slab() - Get free slab for class_idx
- shared_pool_release_slab() - Return slab when empty
- Per-class hints for fast path optimization
3. **Integration** (23 files modified):
- Updated all ss->size_class → meta->class_idx
- Updated all meta->owner_tid → meta->owner_tid_low
- superslab_refill() now uses shared pool
- Free path releases empty slabs back to pool
4. **Build system** (Makefile):
- Added hakmem_shared_pool.o to OBJS_BASE and TINY_BENCH_OBJS_BASE
## Status: ⚠️ Build OK, Runtime CRASH
**Build**: ✅ SUCCESS
- All 23 files compile without errors
- Only warnings: superslab_allocate type mismatch (legacy code)
**Runtime**: ❌ SEGFAULT
- Crash location: sll_refill_small_from_ss()
- Exit code: 139 (SIGSEGV)
- Test case: ./bench_random_mixed_hakmem 1000 256 42
## Known Issues
1. **SEGFAULT in refill path** - Likely shared_pool_acquire_slab() issue
2. **Legacy superslab_allocate()** still exists (type mismatch warning)
3. **Remaining TODOs** from design doc:
- SuperSlab physical layout integration
- slab_handle.h cleanup
- Remove old per-class head implementation
## Next Steps
1. Debug SEGFAULT (gdb backtrace shows sll_refill_small_from_ss)
2. Fix shared_pool_acquire_slab() or superslab_init_slab()
3. Basic functionality test (1K → 100K iterations)
4. Measure SuperSlab count reduction (877 → 100-200)
5. Performance benchmark (+650-860% expected)
## Files Changed (25 files)
core/box/free_local_box.c
core/box/free_remote_box.c
core/box/front_gate_classifier.c
core/hakmem_super_registry.c
core/hakmem_tiny.c
core/hakmem_tiny_bg_spill.c
core/hakmem_tiny_free.inc
core/hakmem_tiny_lifecycle.inc
core/hakmem_tiny_magazine.c
core/hakmem_tiny_query.c
core/hakmem_tiny_refill.inc.h
core/hakmem_tiny_superslab.c
core/hakmem_tiny_superslab.h
core/hakmem_tiny_tls_ops.h
core/slab_handle.h
core/superslab/superslab_inline.h
core/superslab/superslab_types.h
core/tiny_debug.h
core/tiny_free_fast.inc.h
core/tiny_free_magazine.inc.h
core/tiny_remote.c
core/tiny_superslab_alloc.inc.h
core/tiny_superslab_free.inc.h
Makefile
## New Files (3 files)
PHASE12_SHARED_SUPERSLAB_POOL_DESIGN.md
core/hakmem_shared_pool.c
core/hakmem_shared_pool.h
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
Co-Authored-By: ChatGPT <chatgpt@openai.com>
This commit is contained in:
260
core/hakmem_shared_pool.c
Normal file
260
core/hakmem_shared_pool.c
Normal file
@ -0,0 +1,260 @@
|
||||
#include "hakmem_shared_pool.h"
|
||||
#include "hakmem_tiny_superslab_constants.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
// Phase 12-2: SharedSuperSlabPool skeleton implementation
|
||||
// Goal:
|
||||
// - Centralize SuperSlab allocation/registration
|
||||
// - Provide acquire_slab/release_slab APIs for later refill/free integration
|
||||
// - Keep logic simple & conservative; correctness and observability first.
|
||||
//
|
||||
// Notes:
|
||||
// - Concurrency: protected by g_shared_pool.alloc_lock for now.
|
||||
// - class_hints is best-effort: read lock-free, written under lock.
|
||||
// - LRU hooks left as no-op placeholders.
|
||||
|
||||
SharedSuperSlabPool g_shared_pool = {
|
||||
.slabs = NULL,
|
||||
.capacity = 0,
|
||||
.total_count = 0,
|
||||
.active_count = 0,
|
||||
.alloc_lock = PTHREAD_MUTEX_INITIALIZER,
|
||||
.class_hints = { NULL },
|
||||
.lru_head = NULL,
|
||||
.lru_tail = NULL,
|
||||
.lru_count = 0
|
||||
};
|
||||
|
||||
static void
|
||||
shared_pool_ensure_capacity_unlocked(uint32_t min_capacity)
|
||||
{
|
||||
if (g_shared_pool.capacity >= min_capacity) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t new_cap = g_shared_pool.capacity ? g_shared_pool.capacity : 16;
|
||||
while (new_cap < min_capacity) {
|
||||
new_cap *= 2;
|
||||
}
|
||||
|
||||
SuperSlab** new_slabs = (SuperSlab**)realloc(g_shared_pool.slabs,
|
||||
new_cap * sizeof(SuperSlab*));
|
||||
if (!new_slabs) {
|
||||
// Allocation failure: keep old state; caller must handle NULL later.
|
||||
return;
|
||||
}
|
||||
|
||||
// Zero new entries to keep scanning logic simple.
|
||||
memset(new_slabs + g_shared_pool.capacity, 0,
|
||||
(new_cap - g_shared_pool.capacity) * sizeof(SuperSlab*));
|
||||
|
||||
g_shared_pool.slabs = new_slabs;
|
||||
g_shared_pool.capacity = new_cap;
|
||||
}
|
||||
|
||||
void
|
||||
shared_pool_init(void)
|
||||
{
|
||||
// Idempotent init; safe to call from multiple early paths.
|
||||
// pthread_mutex_t with static initializer is already valid.
|
||||
pthread_mutex_lock(&g_shared_pool.alloc_lock);
|
||||
if (g_shared_pool.capacity == 0 && g_shared_pool.slabs == NULL) {
|
||||
shared_pool_ensure_capacity_unlocked(16);
|
||||
}
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
}
|
||||
|
||||
// Internal: allocate and register a new SuperSlab.
|
||||
// Caller must hold alloc_lock.
|
||||
static SuperSlab*
|
||||
shared_pool_allocate_superslab_unlocked(void)
|
||||
{
|
||||
// Allocate SuperSlab and backing memory region.
|
||||
// NOTE: Existing code likely has a helper; we keep this minimal for now.
|
||||
SuperSlab* ss = (SuperSlab*)aligned_alloc(64, sizeof(SuperSlab));
|
||||
if (!ss) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(ss, 0, sizeof(SuperSlab));
|
||||
ss->magic = SUPERSLAB_MAGIC;
|
||||
ss->lg_size = SUPERSLAB_LG_DEFAULT;
|
||||
ss->active_slabs = 0;
|
||||
ss->slab_bitmap = 0;
|
||||
|
||||
// Initialize all per-slab metadata to UNASSIGNED for Phase 12 semantics.
|
||||
for (int i = 0; i < SLABS_PER_SUPERSLAB_MAX; i++) {
|
||||
ss->slabs[i].class_idx = 255; // UNASSIGNED
|
||||
ss->slabs[i].owner_tid_low = 0;
|
||||
}
|
||||
|
||||
// Register into pool array.
|
||||
if (g_shared_pool.total_count >= g_shared_pool.capacity) {
|
||||
shared_pool_ensure_capacity_unlocked(g_shared_pool.total_count + 1);
|
||||
if (g_shared_pool.total_count >= g_shared_pool.capacity) {
|
||||
free(ss);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
g_shared_pool.slabs[g_shared_pool.total_count] = ss;
|
||||
g_shared_pool.total_count++;
|
||||
// Not counted as active until we assign at least one slab.
|
||||
return ss;
|
||||
}
|
||||
|
||||
SuperSlab*
|
||||
shared_pool_acquire_superslab(void)
|
||||
{
|
||||
shared_pool_init();
|
||||
|
||||
pthread_mutex_lock(&g_shared_pool.alloc_lock);
|
||||
|
||||
// For now, always allocate a fresh SuperSlab and register it.
|
||||
// More advanced reuse/GC comes later.
|
||||
SuperSlab* ss = shared_pool_allocate_superslab_unlocked();
|
||||
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
return ss;
|
||||
}
|
||||
|
||||
int
|
||||
shared_pool_acquire_slab(int class_idx, SuperSlab** ss_out, int* slab_idx_out)
|
||||
{
|
||||
if (!ss_out || !slab_idx_out) {
|
||||
return -1;
|
||||
}
|
||||
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
shared_pool_init();
|
||||
|
||||
// Fast-path hint: read without lock (best-effort).
|
||||
SuperSlab* hint = g_shared_pool.class_hints[class_idx];
|
||||
if (hint) {
|
||||
// Scan for a free, unassigned slab in this SuperSlab.
|
||||
uint32_t bitmap = hint->slab_bitmap;
|
||||
for (int i = 0; i < SLABS_PER_SUPERSLAB_MAX; i++) {
|
||||
uint32_t bit = (1u << i);
|
||||
if ((bitmap & bit) == 0 && hint->slabs[i].class_idx == 255) {
|
||||
// Tentative claim: upgrade under lock to avoid races.
|
||||
pthread_mutex_lock(&g_shared_pool.alloc_lock);
|
||||
// Re-check under lock.
|
||||
bitmap = hint->slab_bitmap;
|
||||
if ((bitmap & bit) == 0 && hint->slabs[i].class_idx == 255) {
|
||||
hint->slab_bitmap |= bit;
|
||||
hint->slabs[i].class_idx = (uint8_t)class_idx;
|
||||
hint->active_slabs++;
|
||||
if (hint->active_slabs == 1) {
|
||||
g_shared_pool.active_count++;
|
||||
}
|
||||
*ss_out = hint;
|
||||
*slab_idx_out = i;
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
return 0;
|
||||
}
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
break; // fall through to slow path
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path: lock and scan all registered SuperSlabs.
|
||||
pthread_mutex_lock(&g_shared_pool.alloc_lock);
|
||||
|
||||
for (uint32_t idx = 0; idx < g_shared_pool.total_count; idx++) {
|
||||
SuperSlab* ss = g_shared_pool.slabs[idx];
|
||||
if (!ss) {
|
||||
continue;
|
||||
}
|
||||
uint32_t bitmap = ss->slab_bitmap;
|
||||
for (int i = 0; i < SLABS_PER_SUPERSLAB_MAX; i++) {
|
||||
uint32_t bit = (1u << i);
|
||||
if ((bitmap & bit) == 0 && ss->slabs[i].class_idx == 255) {
|
||||
// Assign this slab to class_idx.
|
||||
ss->slab_bitmap |= bit;
|
||||
ss->slabs[i].class_idx = (uint8_t)class_idx;
|
||||
ss->active_slabs++;
|
||||
if (ss->active_slabs == 1) {
|
||||
g_shared_pool.active_count++;
|
||||
}
|
||||
// Update hint.
|
||||
g_shared_pool.class_hints[class_idx] = ss;
|
||||
*ss_out = ss;
|
||||
*slab_idx_out = i;
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No existing space: allocate a new SuperSlab and take its first slab.
|
||||
SuperSlab* ss = shared_pool_allocate_superslab_unlocked();
|
||||
if (!ss) {
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int slab_idx = 0;
|
||||
ss->slab_bitmap |= (1u << slab_idx);
|
||||
ss->slabs[slab_idx].class_idx = (uint8_t)class_idx;
|
||||
ss->active_slabs = 1;
|
||||
g_shared_pool.active_count++;
|
||||
|
||||
g_shared_pool.class_hints[class_idx] = ss;
|
||||
|
||||
*ss_out = ss;
|
||||
*slab_idx_out = slab_idx;
|
||||
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
shared_pool_release_slab(SuperSlab* ss, int slab_idx)
|
||||
{
|
||||
if (!ss) {
|
||||
return;
|
||||
}
|
||||
if (slab_idx < 0 || slab_idx >= SLABS_PER_SUPERSLAB_MAX) {
|
||||
return;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&g_shared_pool.alloc_lock);
|
||||
|
||||
TinySlabMeta* meta = &ss->slabs[slab_idx];
|
||||
if (meta->used != 0) {
|
||||
// Not actually empty; nothing to do.
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t bit = (1u << slab_idx);
|
||||
if (ss->slab_bitmap & bit) {
|
||||
ss->slab_bitmap &= ~bit;
|
||||
uint8_t old_class = meta->class_idx;
|
||||
meta->class_idx = 255; // UNASSIGNED
|
||||
|
||||
if (ss->active_slabs > 0) {
|
||||
ss->active_slabs--;
|
||||
if (ss->active_slabs == 0 && g_shared_pool.active_count > 0) {
|
||||
g_shared_pool.active_count--;
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidate class hint if it pointed here and this superslab has no free slab
|
||||
// for that class anymore; for now we do a simple best-effort clear.
|
||||
if (old_class < TINY_NUM_CLASSES_SS &&
|
||||
g_shared_pool.class_hints[old_class] == ss) {
|
||||
// We could rescan ss for another matching slab; to keep it cheap, just clear.
|
||||
g_shared_pool.class_hints[old_class] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO Phase 12-4+: if ss->active_slabs == 0, consider GC / unmap.
|
||||
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
}
|
||||
Reference in New Issue
Block a user