Phase12 debug: restore SUPERSLAB constants/APIs, implement Box2 drain boundary, fix tiny_fast_pop to return BASE, honor TLS SLL toggle in alloc/free fast paths, add fail-fast stubs, and quiet capacity sentinel. Update CURRENT_TASK with A/B results (SLL-off stable; SLL-on crash).

This commit is contained in:
Moe Charm (CI)
2025-11-14 01:02:00 +09:00
parent 03df05ec75
commit fcf098857a
53 changed files with 1608 additions and 2198 deletions

View File

@ -1,4 +1,5 @@
#include "hakmem_shared_pool.h"
#include "hakmem_tiny_superslab.h"
#include "hakmem_tiny_superslab_constants.h"
#include <stdlib.h>
@ -66,48 +67,67 @@ shared_pool_init(void)
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
}
// Internal: allocate and register a new SuperSlab.
// Caller must hold alloc_lock.
/*
* Internal: allocate and register a new SuperSlab for the shared pool.
*
* Phase 12 NOTE:
* - We MUST use the real superslab_allocate() path so that:
* - backing memory is a full SuperSlab region (12MB),
* - header/layout are initialized correctly,
* - registry integration stays consistent.
* - shared_pool is responsible only for:
* - tracking pointers,
* - marking per-slab class_idx as UNASSIGNED initially.
* It does NOT bypass registry/LRU.
*
* Caller must hold alloc_lock.
*/
static SuperSlab*
shared_pool_allocate_superslab_unlocked(void)
{
// Allocate SuperSlab and backing memory region.
// NOTE: Existing code likely has a helper; we keep this minimal for now.
SuperSlab* ss = (SuperSlab*)aligned_alloc(64, sizeof(SuperSlab));
// Use size_class 0 as a neutral hint; Phase 12 per-slab class_idx is authoritative.
extern SuperSlab* superslab_allocate(uint8_t size_class);
SuperSlab* ss = superslab_allocate(0);
if (!ss) {
return NULL;
}
memset(ss, 0, sizeof(SuperSlab));
ss->magic = SUPERSLAB_MAGIC;
ss->lg_size = SUPERSLAB_LG_DEFAULT;
ss->active_slabs = 0;
ss->slab_bitmap = 0;
// Initialize all per-slab metadata to UNASSIGNED for Phase 12 semantics.
for (int i = 0; i < SLABS_PER_SUPERSLAB_MAX; i++) {
ss->slabs[i].class_idx = 255; // UNASSIGNED
ss->slabs[i].owner_tid_low = 0;
// superslab_allocate() already:
// - zeroes slab metadata / remote queues,
// - sets magic/lg_size/etc,
// - registers in global registry.
// For shared-pool semantics we normalize all slab class_idx to UNASSIGNED.
int max_slabs = ss_slabs_capacity(ss);
for (int i = 0; i < max_slabs; i++) {
ss->slabs[i].class_idx = 255; // UNASSIGNED
}
// Register into pool array.
if (g_shared_pool.total_count >= g_shared_pool.capacity) {
shared_pool_ensure_capacity_unlocked(g_shared_pool.total_count + 1);
if (g_shared_pool.total_count >= g_shared_pool.capacity) {
free(ss);
// Pool table expansion failed; leave ss alive (registry-owned),
// but do not treat it as part of shared_pool.
return NULL;
}
}
g_shared_pool.slabs[g_shared_pool.total_count] = ss;
g_shared_pool.total_count++;
// Not counted as active until we assign at least one slab.
// Not counted as active until at least one slab is assigned.
return ss;
}
SuperSlab*
shared_pool_acquire_superslab(void)
{
// Phase 12 debug safety:
// If shared backend is disabled at Box API level, this function SHOULD NOT be called.
// But since bench currently SEGVs here even with legacy forced, treat this as a hard guard:
// we early-return error instead of touching potentially-bad state.
//
// This isolates shared_pool from the current crash so we can validate legacy path first.
// FIXED: Remove the return -1; that was preventing operation
shared_pool_init();
pthread_mutex_lock(&g_shared_pool.alloc_lock);
@ -123,6 +143,10 @@ shared_pool_acquire_superslab(void)
int
shared_pool_acquire_slab(int class_idx, SuperSlab** ss_out, int* slab_idx_out)
{
// Phase 12: real shared backend is enabled; this function must be correct & safe.
// Invariants (callers rely on):
// - On success, *ss_out != NULL, 0 <= *slab_idx_out < SLABS_PER_SUPERSLAB_MAX.
// - The chosen slab has meta->class_idx == class_idx and capacity > 0.
if (!ss_out || !slab_idx_out) {
return -1;
}