Files
hakmem/core/box/ss_unified_backend_box.c
Moe Charm (CI) 6ac6f5ae1b Refactor: Split hakmem_tiny_superslab.c + unified backend exit point
Major refactoring to improve maintainability and debugging:

1. Split hakmem_tiny_superslab.c (1521 lines) into 7 focused files:
   - superslab_allocate.c: SuperSlab allocation/deallocation
   - superslab_backend.c: Backend allocation paths (legacy, shared)
   - superslab_ace.c: ACE (Adaptive Cache Engine) logic
   - superslab_slab.c: Slab initialization and bitmap management
   - superslab_cache.c: LRU cache and prewarm cache management
   - superslab_head.c: SuperSlabHead management and expansion
   - superslab_stats.c: Statistics tracking and debugging

2. Created hakmem_tiny_superslab_internal.h for shared declarations

3. Added superslab_return_block() as single exit point for header writing:
   - All backend allocations now go through this helper
   - Prevents bugs where headers are forgotten in some paths
   - Makes future debugging easier

4. Updated Makefile for new file structure

5. Added header writing to ss_legacy_backend_box.c and
   ss_unified_backend_box.c (though not currently linked)

Note: Header corruption bug in Larson benchmark still exists.
Class 1-6 allocations go through TLS refill/carve paths, not backend.
Further investigation needed.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 05:13:04 +09:00

189 lines
7.5 KiB
C
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// Box: Unified Backend (Phase 12)
// Purpose: Unified entry point for SuperSlab allocation (shared pool + legacy fallback)
#include "ss_unified_backend_box.h"
#include "ss_legacy_backend_box.h"
#include "hakmem_tiny_superslab.h"
#include "hakmem_shared_pool.h"
#include "hakmem_tiny_config.h"
#include "ss_allocation_box.h"
#include "../tiny_region_id.h" // For tiny_region_id_write_header
#include <stdio.h>
#include <stdlib.h>
// ============================================================================
// Shared Pool Backend
// ============================================================================
/*
* Shared pool backend for hak_tiny_alloc_superslab_box().
*
* Phase 12-2:
* - Uses SharedSuperSlabPool (g_shared_pool) to obtain a SuperSlab/slab
* for the requested class_idx.
* - This backend EXPRESSLY owns only:
* - choosing (ss, slab_idx) via shared_pool_acquire_slab()
* - initializing that slab's TinySlabMeta via superslab_init_slab()
* and nothing else; all callers must go through hak_tiny_alloc_superslab_box().
*
* - For now this is a minimal, conservative implementation:
* - One linear bump-run is carved from the acquired slab using tiny_block_stride_for_class().
* - No complex per-slab freelist or refill policy yet (Phase 12-3+).
* - If shared_pool_acquire_slab() fails, we fall back to legacy backend.
*/
void* hak_tiny_alloc_superslab_backend_shared(int class_idx)
{
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
return NULL;
}
SuperSlab* ss = NULL;
int slab_idx = -1;
if (shared_pool_acquire_slab(class_idx, &ss, &slab_idx) != 0 || !ss) {
// Shared pool could not provide a slab; caller may choose to fall back.
return NULL;
}
TinySlabMeta* meta = &ss->slabs[slab_idx];
// Defensive: shared_pool must either hand us an UNASSIGNED slab or one
// already bound to this class. Anything else is a hard bug.
if (meta->class_idx != 255 && meta->class_idx != (uint8_t)class_idx) {
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr,
"[HAKMEM][SS_SHARED] BUG: acquire_slab mismatch: cls=%d meta->class_idx=%u slab_idx=%d ss=%p\n",
class_idx, (unsigned)meta->class_idx, slab_idx, (void*)ss);
#endif
return NULL;
}
// Initialize slab geometry once for this class.
if (meta->capacity == 0) {
size_t block_size = g_tiny_class_sizes[class_idx];
// owner_tid_low is advisory; we can use 0 in this backend.
superslab_init_slab(ss, slab_idx, block_size, 0);
meta = &ss->slabs[slab_idx];
// CRITICAL FIX: Always set class_idx after init to avoid C0/C7 confusion.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
// Must explicitly set to requested class, not just when class_idx==255.
meta->class_idx = (uint8_t)class_idx;
}
// Final contract check before computing addresses.
if (meta->class_idx != (uint8_t)class_idx ||
meta->capacity == 0 ||
meta->used > meta->capacity) {
#if !HAKMEM_BUILD_RELEASE
fprintf(stderr,
"[HAKMEM][SS_SHARED] BUG: invalid slab meta before alloc: "
"cls=%d slab_idx=%d meta_cls=%u used=%u cap=%u ss=%p\n",
class_idx, slab_idx,
(unsigned)meta->class_idx,
(unsigned)meta->used,
(unsigned)meta->capacity,
(void*)ss);
#endif
return NULL;
}
// Simple bump allocation within this slab.
if (meta->used >= meta->capacity) {
// Slab exhausted: in minimal Phase12-2 backend we do not loop;
// caller or future logic must acquire another slab.
return NULL;
}
size_t stride = tiny_block_stride_for_class(class_idx);
size_t offset = (size_t)meta->used * stride;
// Phase 12-2 minimal geometry:
// - slab 0 data offset via SUPERSLAB_SLAB0_DATA_OFFSET
// - subsequent slabs at fixed SUPERSLAB_SLAB_USABLE_SIZE strides.
size_t slab_base_off = SUPERSLAB_SLAB0_DATA_OFFSET
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE;
uint8_t* base = (uint8_t*)ss + slab_base_off + offset;
meta->used++;
atomic_fetch_add_explicit(&ss->total_active_blocks, 1, memory_order_relaxed);
hak_tiny_ss_hint_record(class_idx, ss, slab_idx);
#if HAKMEM_TINY_HEADER_CLASSIDX
return tiny_region_id_write_header(base, class_idx);
#else
return (void*)base;
#endif
}
// ============================================================================
// Unified Entry Point
// ============================================================================
/*
* Box API entry:
* - Single front-door for tiny-side Superslab allocations.
*
* Phase 27 policy (Unified backend line):
* - デフォルト: Shared Pool backend のみを使用legacy backend は利用しない)。
* - 回帰/デバッグ用途でのみ、ENV で legacy fallback を明示的に有効化できる。
*
* ENV:
* HAKMEM_TINY_SS_SHARED=0 → 強制的に legacy backend のみを使用(過去挙動の回帰確認用)
* HAKMEM_TINY_SS_LEGACY_FALLBACK=0 → shared 失敗時の legacy fallback を無効化(デフォルト: 1, 有効)
* HAKMEM_TINY_SS_C23_UNIFIED=1 → C2/C3 限定で legacy fallback を無効化Shared Pool のみで運転)
* HAKMEM_TINY_SS_LEGACY_HINT=1 → shared→legacy 間の軽量な hint Box を有効化
*/
void* hak_tiny_alloc_superslab_box(int class_idx)
{
static int g_ss_shared_mode = -1;
static int g_ss_legacy_fallback = -1;
static int g_ss_c23_unified = -1;
if (__builtin_expect(g_ss_shared_mode == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SS_SHARED");
// デフォルト: shared 有効。ENV=0 のときのみ legacy 専用に切り替え。
g_ss_shared_mode = (e && *e == '0') ? 0 : 1;
}
if (__builtin_expect(g_ss_legacy_fallback == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SS_LEGACY_FALLBACK");
// デフォルト: legacy fallback 有効。
// ENV=0 のときのみ fallback 無効(完全 Unified backend モード)。
g_ss_legacy_fallback = (e && *e == '0') ? 0 : 1;
}
if (__builtin_expect(g_ss_c23_unified == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_SS_C23_UNIFIED");
// ENV=1 のときだけ C2/C3 を「ほぼ完全 Unified」モードに切り替える。
g_ss_c23_unified = (e && *e && *e != '0') ? 1 : 0;
}
// shared OFF 時は legacy のみ(強制回帰モード)
if (!g_ss_shared_mode) {
return hak_tiny_alloc_superslab_backend_legacy(class_idx);
}
int legacy_fallback = g_ss_legacy_fallback;
if ((class_idx == 2 || class_idx == 3) && g_ss_c23_unified == 1) {
// C2/C3 は専用の Shared Pool 実験モード:
// - ENV=1 のときだけ legacy fallback を OFF にする。
legacy_fallback = 0;
}
// Unified backend ライン: Shared Pool backend を唯一の正経路とする。
void* p = hak_tiny_alloc_superslab_backend_shared(class_idx);
if (p != NULL || !legacy_fallback) {
// shared 成功、または legacy fallback 無効 → そのまま返すNULLも許容
return p;
}
// オプション: shared 失敗時に、軽量な hint Box を一度だけ試す
void* hint = hak_tiny_alloc_superslab_backend_hint(class_idx);
if (hint != NULL) {
return hint;
}
// shared 失敗時のみ legacy backend へフォールバック(回帰/デバッグ用)
return hak_tiny_alloc_superslab_backend_legacy(class_idx);
}