Phase 9-2: Remove Legacy Backend & Unify to Shared Pool (50M ops/s)

- Removed Legacy Backend fallback; Shared Pool is now the sole backend.
- Removed Soft Cap limit in Shared Pool to allow full memory management.
- Implemented EMPTY slab recycling with batched meta->used decrement in remote drain.
- Updated tiny_free_local_box to return is_empty status for safe recycling.
- Fixed race condition in release path by removing from legacy list early.
- Achieved 50.3M ops/s in WS8192 benchmark (+200% vs baseline).
This commit is contained in:
Moe Charm (CI)
2025-12-01 13:47:23 +09:00
parent 3a040a545a
commit 0bc33dc4f5
7 changed files with 92 additions and 102 deletions

View File

@ -28,6 +28,9 @@ void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
g_superslab_heads[class_idx] = head;
}
// LOCK expansion_lock to protect list traversal (vs remove_superslab_from_legacy_head)
pthread_mutex_lock(&head->expansion_lock);
SuperSlab* chunk = head->current_chunk ? head->current_chunk : head->first_chunk;
while (chunk) {
@ -62,12 +65,19 @@ void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
meta->used++;
atomic_fetch_add_explicit(&chunk->total_active_blocks, 1, memory_order_relaxed);
// UNLOCK before return
pthread_mutex_unlock(&head->expansion_lock);
HAK_RET_ALLOC_BLOCK_TRACED(class_idx, base, ALLOC_PATH_BACKEND);
}
}
chunk = chunk->next_chunk;
}
// UNLOCK before expansion (which takes lock internally)
pthread_mutex_unlock(&head->expansion_lock);
if (expand_superslab_head(head) < 0) {
return NULL;
}
@ -212,74 +222,23 @@ void* hak_tiny_alloc_superslab_backend_shared(int class_idx)
* Box API entry:
* - Single front-door for tiny-side Superslab allocations.
*
* Phase 9-2 Root Fix: Shared Pool backend unified mode (default ON)
* Phase 9-2 Final: Shared Pool ONLY (Legacy Backend Removed)
* Policy:
* - HAKMEM_TINY_SS_SHARED=2 (default) → Shared Pool backend ONLY (no legacy fallback)
* - HAKMEM_TINY_SS_SHARED=1 → Shared Pool backend with legacy fallback (testing mode)
* - HAKMEM_TINY_SS_SHARED=0 → Legacy backend only (compatibility mode)
*
* Root Cause: Legacy backend (g_superslab_heads) has TLS_SLL_DUP issue
* Solution: Disable legacy backend by default, keep as "reversible box" via env var
* - HAKMEM_TINY_SS_SHARED is now ignored (or used only for logging).
* - Always uses Shared Pool backend.
* - Legacy backend (g_superslab_heads) is no longer used for allocation.
*/
void* hak_tiny_alloc_superslab_box(int class_idx)
{
static int g_ss_shared_mode = -1;
static _Atomic uint32_t g_ss_backend_log = 0;
if (__builtin_expect(g_ss_shared_mode == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SS_SHARED");
if (!e || !*e) {
g_ss_shared_mode = 2; // Phase 9-2 Root Fix: Shared Pool ONLY (no legacy fallback)
} else {
int v = atoi(e);
g_ss_shared_mode = v; // 0=legacy only, 1=shared+fallback, 2=shared only
}
#if !HAKMEM_BUILD_RELEASE
const char* mode_str = (g_ss_shared_mode == 2) ? "shared_only" :
(g_ss_shared_mode == 1) ? "shared+fallback" : "legacy_only";
fprintf(stderr, "[SS_BACKEND] Mode: %s (HAKMEM_TINY_SS_SHARED=%d)\n", mode_str, g_ss_shared_mode);
#endif
}
// Mode 2: Shared Pool ONLY (default, no legacy fallback)
if (g_ss_shared_mode == 2) {
void* p = hak_tiny_alloc_superslab_backend_shared(class_idx);
if (p != NULL) {
uint32_t n = atomic_fetch_add_explicit(&g_ss_backend_log, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr, "[SS_BACKEND] shared_only cls=%d ptr=%p\n", class_idx, p);
}
return p;
}
// Phase 9-2: NO fallback to legacy - return NULL on failure
uint32_t n = atomic_fetch_add_explicit(&g_ss_backend_log, 1, memory_order_relaxed);
// Always use Shared Pool (Mode 2 equivalent)
void* p = hak_tiny_alloc_superslab_backend_shared(class_idx);
if (p == NULL) {
static _Atomic uint32_t g_ss_oom_log = 0;
uint32_t n = atomic_fetch_add_explicit(&g_ss_oom_log, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr, "[SS_BACKEND] shared_fail→NULL (no legacy) cls=%d\n", class_idx);
fprintf(stderr, "[SS_BACKEND] shared_fail→NULL (OOM) cls=%d\n", class_idx);
}
return NULL;
}
// Mode 1: Shared Pool with legacy fallback (testing mode)
if (g_ss_shared_mode == 1) {
void* p = hak_tiny_alloc_superslab_backend_shared(class_idx);
if (p != NULL) {
uint32_t n = atomic_fetch_add_explicit(&g_ss_backend_log, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr, "[SS_BACKEND] shared cls=%d ptr=%p\n", class_idx, p);
}
return p;
}
// Fallback to legacy
uint32_t n = atomic_fetch_add_explicit(&g_ss_backend_log, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr, "[SS_BACKEND] shared_fail→legacy cls=%d\n", class_idx);
}
return hak_tiny_alloc_superslab_backend_legacy(class_idx);
}
// Mode 0: Legacy backend only (compatibility mode)
uint32_t n = atomic_fetch_add_explicit(&g_ss_backend_log, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr, "[SS_BACKEND] legacy cls=%d\n", class_idx);
}
return hak_tiny_alloc_superslab_backend_legacy(class_idx);
return p;
}