Files
hakmem/core/superslab_backend.c

152 lines
5.7 KiB
C
Raw Permalink Normal View History

// superslab_backend.c - Backend allocation paths for SuperSlab allocator
// Purpose: Shared pool backend implementation (legacy path archived)
// License: MIT
// Date: 2025-11-28
#include "hakmem_tiny_superslab_internal.h"
#include "box/c7_meta_used_counter_box.h"
#include <stdatomic.h>
static _Atomic uint32_t g_c7_backend_calls = 0;
// Note: Legacy backend moved to archive/superslab_backend_legacy.c (not built).
/*
* Shared pool backend for hak_tiny_alloc_superslab_box().
*
* Phase 12-2:
* - Uses SharedSuperSlabPool (g_shared_pool) to obtain a SuperSlab/slab
* for the requested class_idx.
* - This backend EXPRESSLY owns only:
* - choosing (ss, slab_idx) via shared_pool_acquire_slab()
* - initializing that slab's TinySlabMeta via superslab_init_slab()
* and nothing else; all callers must go through hak_tiny_alloc_superslab_box().
*
* - For now this is a minimal, conservative implementation:
* - One linear bump-run is carved from the acquired slab using tiny_block_stride_for_class().
* - No complex per-slab freelist or refill policy yet (Phase 12-3+).
* - If shared_pool_acquire_slab() fails, allocation returns NULL (no legacy fallback).
*/
void* hak_tiny_alloc_superslab_backend_shared(int class_idx)
{
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
SuperSlab* ss = NULL;
int slab_idx = -1;
if (shared_pool_acquire_slab(class_idx, &ss, &slab_idx) != 0 || !ss) {
// Shared pool could not provide a slab; caller may choose to fall back.
return NULL;
}
TinySlabMeta* meta = &ss->slabs[slab_idx];
// Defensive: shared_pool must either hand us an UNASSIGNED slab or one
// already bound to this class. Anything else is a hard bug.
if (meta->class_idx != 255 && meta->class_idx != (uint8_t)class_idx) {
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr,
"[HAKMEM][SS_SHARED] BUG: acquire_slab mismatch: cls=%d meta->class_idx=%u slab_idx=%d ss=%p\n",
class_idx, (unsigned)meta->class_idx, slab_idx, (void*)ss);
#endif
return NULL;
}
// Initialize slab geometry once for this class.
if (meta->capacity == 0) {
size_t block_size = g_tiny_class_sizes[class_idx];
// LARSON FIX: Pass actual thread ID for cross-thread free detection
uint32_t my_tid = (uint32_t)(uintptr_t)pthread_self();
superslab_init_slab(ss, slab_idx, block_size, my_tid);
meta = &ss->slabs[slab_idx];
// CRITICAL FIX: Always set class_idx after init to avoid C0/C7 confusion.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
// Must explicitly set to requested class, not just when class_idx==255.
meta->class_idx = (uint8_t)class_idx;
// P1.1: Update class_map in shared acquire path
ss->class_map[slab_idx] = (uint8_t)class_idx;
}
// Final contract check before computing addresses.
if (meta->class_idx != (uint8_t)class_idx ||
meta->capacity == 0 ||
meta->used > meta->capacity) {
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr,
"[HAKMEM][SS_SHARED] BUG: invalid slab meta before alloc: "
"cls=%d slab_idx=%d meta_cls=%u used=%u cap=%u ss=%p\n",
class_idx, slab_idx,
(unsigned)meta->class_idx,
(unsigned)meta->used,
(unsigned)meta->capacity,
(void*)ss);
#endif
return NULL;
}
if (class_idx == 7) {
uint32_t n = atomic_fetch_add_explicit(&g_c7_backend_calls, 1, memory_order_relaxed);
if (n < 8) {
fprintf(stderr,
"[REL_C7_BACKEND_CALL] cls=%d meta_cls=%u used=%u cap=%u ss=%p slab=%d\n",
class_idx,
(unsigned)meta->class_idx,
(unsigned)meta->used,
(unsigned)meta->capacity,
(void*)ss,
slab_idx);
}
}
// Simple bump allocation within this slab.
if (meta->used >= meta->capacity) {
// Slab exhausted: in minimal Phase12-2 backend we do not loop;
// caller or future logic must acquire another slab.
return NULL;
}
size_t stride = tiny_block_stride_for_class(class_idx);
size_t offset = (size_t)meta->used * stride;
// Phase 12-2 minimal geometry:
// - slab 0 data offset via SUPERSLAB_SLAB0_DATA_OFFSET
// - subsequent slabs at fixed SUPERSLAB_SLAB_USABLE_SIZE strides.
size_t slab_base_off = SUPERSLAB_SLAB0_DATA_OFFSET
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE;
uint8_t* base = (uint8_t*)ss + slab_base_off + offset;
meta->used++;
c7_meta_used_note(class_idx, C7_META_USED_SRC_BACKEND);
atomic_fetch_add_explicit(&ss->total_active_blocks, 1, memory_order_relaxed);
HAK_RET_ALLOC_BLOCK_TRACED(class_idx, base, ALLOC_PATH_BACKEND);
}
/*
* Box API entry:
* - Single front-door for tiny-side Superslab allocations.
*
* Phase 9-2 Final: Shared Pool ONLY (Legacy Backend Removed)
* Policy:
* - HAKMEM_TINY_SS_SHARED is now ignored (or used only for logging).
* - Always uses Shared Pool backend.
* - Legacy backend (g_superslab_heads) is no longer used for allocation.
*/
void* hak_tiny_alloc_superslab_box(int class_idx)
{
// Always use Shared Pool (Mode 2 equivalent)
void* p = hak_tiny_alloc_superslab_backend_shared(class_idx);
if (p == NULL) {
static _Atomic uint32_t g_ss_oom_log = 0;
uint32_t n = atomic_fetch_add_explicit(&g_ss_oom_log, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr, "[SS_BACKEND] shared_fail→NULL (OOM) cls=%d\n", class_idx);
}
}
return p;
}