Files
hakmem/core/box/free_local_box.c
Moe Charm (CI) 03df05ec75 Phase 12: Shared SuperSlab Pool implementation (WIP - runtime crash)
## Summary
Implemented Phase 12 Shared SuperSlab Pool (mimalloc-style) to address
SuperSlab allocation churn (877 SuperSlabs → 100-200 target).

## Implementation (ChatGPT + Claude)
1. **Metadata changes** (superslab_types.h):
   - Added class_idx to TinySlabMeta (per-slab dynamic class)
   - Removed size_class from SuperSlab (no longer per-SuperSlab)
   - Changed owner_tid (16-bit) → owner_tid_low (8-bit)

2. **Shared Pool** (hakmem_shared_pool.{h,c}):
   - Global pool shared by all size classes
   - shared_pool_acquire_slab() - Get free slab for class_idx
   - shared_pool_release_slab() - Return slab when empty
   - Per-class hints for fast path optimization

3. **Integration** (23 files modified):
   - Updated all ss->size_class → meta->class_idx
   - Updated all meta->owner_tid → meta->owner_tid_low
   - superslab_refill() now uses shared pool
   - Free path releases empty slabs back to pool

4. **Build system** (Makefile):
   - Added hakmem_shared_pool.o to OBJS_BASE and TINY_BENCH_OBJS_BASE

## Status: ⚠️ Build OK, Runtime CRASH

**Build**:  SUCCESS
- All 23 files compile without errors
- Only warnings: superslab_allocate type mismatch (legacy code)

**Runtime**:  SEGFAULT
- Crash location: sll_refill_small_from_ss()
- Exit code: 139 (SIGSEGV)
- Test case: ./bench_random_mixed_hakmem 1000 256 42

## Known Issues
1. **SEGFAULT in refill path** - Likely shared_pool_acquire_slab() issue
2. **Legacy superslab_allocate()** still exists (type mismatch warning)
3. **Remaining TODOs** from design doc:
   - SuperSlab physical layout integration
   - slab_handle.h cleanup
   - Remove old per-class head implementation

## Next Steps
1. Debug SEGFAULT (gdb backtrace shows sll_refill_small_from_ss)
2. Fix shared_pool_acquire_slab() or superslab_init_slab()
3. Basic functionality test (1K → 100K iterations)
4. Measure SuperSlab count reduction (877 → 100-200)
5. Performance benchmark (+650-860% expected)

## Files Changed (25 files)
core/box/free_local_box.c
core/box/free_remote_box.c
core/box/front_gate_classifier.c
core/hakmem_super_registry.c
core/hakmem_tiny.c
core/hakmem_tiny_bg_spill.c
core/hakmem_tiny_free.inc
core/hakmem_tiny_lifecycle.inc
core/hakmem_tiny_magazine.c
core/hakmem_tiny_query.c
core/hakmem_tiny_refill.inc.h
core/hakmem_tiny_superslab.c
core/hakmem_tiny_superslab.h
core/hakmem_tiny_tls_ops.h
core/slab_handle.h
core/superslab/superslab_inline.h
core/superslab/superslab_types.h
core/tiny_debug.h
core/tiny_free_fast.inc.h
core/tiny_free_magazine.inc.h
core/tiny_remote.c
core/tiny_superslab_alloc.inc.h
core/tiny_superslab_free.inc.h
Makefile

## New Files (3 files)
PHASE12_SHARED_SUPERSLAB_POOL_DESIGN.md
core/hakmem_shared_pool.c
core/hakmem_shared_pool.h

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
Co-Authored-By: ChatGPT <chatgpt@openai.com>
2025-11-13 16:33:03 +09:00

114 lines
5.3 KiB
C

#include "free_local_box.h"
#include "free_publish_box.h"
#include "hakmem_tiny.h"
#include "tiny_next_ptr_box.h" // Phase E1-CORRECT: Box API
void tiny_free_local_box(SuperSlab* ss, int slab_idx, TinySlabMeta* meta, void* ptr, uint32_t my_tid) {
extern _Atomic uint64_t g_free_local_box_calls;
atomic_fetch_add_explicit(&g_free_local_box_calls, 1, memory_order_relaxed);
if (!(ss && ss->magic == SUPERSLAB_MAGIC)) return;
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) return;
(void)my_tid;
// ✅ Phase E1-CORRECT: ALL classes have headers, calculate BASE pointer once
void* base = (void*)((uint8_t*)ptr - 1);
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
int actual_idx = slab_index_for(ss, base);
if (actual_idx != slab_idx) {
tiny_failfast_abort_ptr("free_local_box_idx", ss, slab_idx, ptr, "slab_idx_mismatch");
} else {
uint8_t cls = (meta && meta->class_idx < TINY_NUM_CLASSES) ? meta->class_idx : 0;
size_t blk = g_tiny_class_sizes[cls];
uint8_t* slab_base = tiny_slab_base_for(ss, slab_idx);
uintptr_t delta = (uintptr_t)base - (uintptr_t)slab_base;
if (blk == 0 || (delta % blk) != 0) {
tiny_failfast_abort_ptr("free_local_box_align", ss, slab_idx, ptr, "misaligned");
} else if (meta && delta / blk >= meta->capacity) {
tiny_failfast_abort_ptr("free_local_box_range", ss, slab_idx, ptr, "out_of_capacity");
}
}
}
void* prev = meta->freelist;
// FREELIST CORRUPTION DEBUG: Validate pointer before writing
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
uint8_t cls = (meta && meta->class_idx < TINY_NUM_CLASSES) ? meta->class_idx : 0;
size_t blk = g_tiny_class_sizes[cls];
uint8_t* base_ss = (uint8_t*)ss;
uint8_t* slab_base = tiny_slab_base_for(ss, slab_idx);
// Verify prev pointer is valid (if not NULL)
if (prev != NULL) {
uintptr_t prev_addr = (uintptr_t)prev;
uintptr_t slab_addr = (uintptr_t)slab_base;
// Check if prev is within this slab
if (prev_addr < (uintptr_t)base_ss || prev_addr >= (uintptr_t)base_ss + (2*1024*1024)) {
fprintf(stderr, "[FREE_CORRUPT] prev=%p outside SuperSlab ss=%p slab=%d\n",
prev, ss, slab_idx);
tiny_failfast_abort_ptr("free_local_prev_range", ss, slab_idx, ptr, "prev_outside_ss");
}
// Check alignment of prev
if ((prev_addr - slab_addr) % blk != 0) {
fprintf(stderr, "[FREE_CORRUPT] prev=%p misaligned (cls=%u slab=%d blk=%zu offset=%zu)\n",
prev, cls, slab_idx, blk, (size_t)(prev_addr - slab_addr));
fprintf(stderr, "[FREE_CORRUPT] Writing from ptr=%p, freelist was=%p\n", ptr, prev);
tiny_failfast_abort_ptr("free_local_prev_misalign", ss, slab_idx, ptr, "prev_misaligned");
}
}
fprintf(stderr, "[FREE_VERIFY] cls=%u slab=%d ptr=%p prev=%p (offset_ptr=%zu offset_prev=%zu)\n",
cls, slab_idx, ptr, prev,
(size_t)((uintptr_t)base - (uintptr_t)slab_base),
prev ? (size_t)((uintptr_t)prev - (uintptr_t)slab_base) : 0);
}
// Use per-slab class for freelist linkage
uint8_t cls = (meta && meta->class_idx < TINY_NUM_CLASSES) ? meta->class_idx : 0;
tiny_next_write(cls, ptr, prev); // Phase E1-CORRECT: Box API with shared pool
meta->freelist = ptr;
// FREELIST CORRUPTION DEBUG: Verify write succeeded
if (__builtin_expect(tiny_refill_failfast_level() >= 2, 0)) {
void* readback = tiny_next_read(cls, ptr); // Phase E1-CORRECT: Box API
if (readback != prev) {
fprintf(stderr, "[FREE_CORRUPT] Wrote prev=%p to ptr=%p but read back %p!\n",
prev, ptr, readback);
fprintf(stderr, "[FREE_CORRUPT] Memory corruption detected during freelist push\n");
tiny_failfast_abort_ptr("free_local_readback", ss, slab_idx, ptr, "write_corrupted");
}
}
tiny_failfast_log("free_local_box", cls, ss, meta, ptr, prev);
// BUGFIX: Memory barrier to ensure freelist visibility before used decrement
// Without this, other threads can see new freelist but old used count (race)
atomic_thread_fence(memory_order_release);
// Optional freelist mask update on first push
do {
static int g_mask_en = -1;
if (__builtin_expect(g_mask_en == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FREELIST_MASK");
g_mask_en = (e && *e && *e != '0') ? 1 : 0;
}
if (__builtin_expect(g_mask_en, 0) && prev == NULL) {
uint32_t bit = (1u << slab_idx);
atomic_fetch_or_explicit(&ss->freelist_mask, bit, memory_order_release);
}
} while (0);
// Track local free (debug helpers may be no-op)
tiny_remote_track_on_local_free(ss, slab_idx, ptr, "local_free", my_tid);
meta->used--;
ss_active_dec_one(ss);
if (prev == NULL) {
// First-free → advertise slab to adopters using per-slab class
uint8_t cls0 = (meta && meta->class_idx < TINY_NUM_CLASSES) ? meta->class_idx : 0;
tiny_free_publish_first_free((int)cls0, ss, slab_idx);
}
}