Box API Phase 1-3: Capacity Manager, Carve-Push, Prewarm 実装
Priority 1-3のBox Modulesを実装し、安全なpre-warming APIを提供。
既存の複雑なprewarmコードを1行のBox API呼び出しに置き換え。
## 新規Box Modules
1. **Box Capacity Manager** (capacity_box.h/c)
- TLS SLL容量の一元管理
- adaptive_sizing初期化保証
- Double-free バグ防止
2. **Box Carve-And-Push** (carve_push_box.h/c)
- アトミックなblock carve + TLS SLL push
- All-or-nothing semantics
- Rollback保証(partial failure防止)
3. **Box Prewarm** (prewarm_box.h/c)
- 安全なTLS cache pre-warming
- 初期化依存性を隠蔽
- シンプルなAPI (1関数呼び出し)
## コード簡略化
hakmem_tiny_init.inc: 20行 → 1行
```c
// BEFORE: 複雑なP0分岐とエラー処理
adaptive_sizing_init();
if (prewarm > 0) {
#if HAKMEM_TINY_P0_BATCH_REFILL
int taken = sll_refill_batch_from_ss(5, prewarm);
#else
int taken = sll_refill_small_from_ss(5, prewarm);
#endif
}
// AFTER: Box API 1行
int taken = box_prewarm_tls(5, prewarm);
```
## シンボルExport修正
hakmem_tiny.c: 5つのシンボルをstatic → non-static
- g_tls_slabs[] (TLS slab配列)
- g_sll_multiplier (SLL容量乗数)
- g_sll_cap_override[] (容量オーバーライド)
- superslab_refill() (SuperSlab再充填)
- ss_active_add() (アクティブカウンタ)
## ビルドシステム
Makefile: TINY_BENCH_OBJS_BASEに3つのBox modules追加
- core/box/capacity_box.o
- core/box/carve_push_box.o
- core/box/prewarm_box.o
## 動作確認
✅ Debug build成功
✅ Box Prewarm API動作確認
[PREWARM] class=5 requested=128 taken=32
## 次のステップ
- Box Refill Manager (Priority 4)
- Box SuperSlab Allocator (Priority 5)
- Release build修正(tiny_debug_ring_record)
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@ -27,6 +27,7 @@
|
||||
#include "superslab/superslab_inline.h" // For slab_index_for/ss_slabs_capacity (Debug validation)
|
||||
#include "box/tls_sll_box.h" // Box TLS-SLL: Safe SLL operations API
|
||||
#include "hakmem_tiny_integrity.h" // PRIORITY 1-4: Corruption detection
|
||||
#include "box/tiny_next_ptr_box.h" // Box API: Next pointer read/write
|
||||
#include <stdint.h>
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
@ -86,10 +87,10 @@ static inline void* tiny_fast_pop(int class_idx);
|
||||
static inline int tiny_fast_push(int class_idx, void* ptr);
|
||||
static inline int tls_refill_from_tls_slab(int class_idx, TinyTLSList* tls, uint32_t want);
|
||||
static inline uint32_t sll_cap_for_class(int class_idx, uint32_t mag_cap);
|
||||
static SuperSlab* superslab_refill(int class_idx);
|
||||
SuperSlab* superslab_refill(int class_idx);
|
||||
static void* slab_data_start(SuperSlab* ss, int slab_idx);
|
||||
static inline uint8_t* tiny_slab_base_for(SuperSlab* ss, int slab_idx);
|
||||
static inline void ss_active_add(SuperSlab* ss, uint32_t n);
|
||||
void ss_active_add(SuperSlab* ss, uint32_t n);
|
||||
static inline void ss_active_inc(SuperSlab* ss);
|
||||
static TinySlab* allocate_new_slab(int class_idx);
|
||||
static void move_to_full_list(int class_idx, struct TinySlab* target_slab);
|
||||
@ -180,16 +181,11 @@ static inline void* tiny_fast_refill_and_take(int class_idx, TinyTLSList* tls) {
|
||||
}
|
||||
|
||||
void* ret = batch_head;
|
||||
#if HAKMEM_TINY_HEADER_CLASSIDX
|
||||
const size_t next_off_tls = (class_idx == 7) ? 0 : 1;
|
||||
#else
|
||||
const size_t next_off_tls = 0;
|
||||
#endif
|
||||
void* node = *(void**)((uint8_t*)ret + next_off_tls);
|
||||
void* node = tiny_next_read(class_idx, ret);
|
||||
uint32_t remaining = (taken > 0u) ? (taken - 1u) : 0u;
|
||||
|
||||
while (node && remaining > 0u) {
|
||||
void* next = *(void**)((uint8_t*)node + next_off_tls);
|
||||
void* next = tiny_next_read(class_idx, node);
|
||||
int pushed = 0;
|
||||
if (__builtin_expect(g_fastcache_enable && class_idx <= 3, 1)) {
|
||||
// Headerless array stack for hottest tiny classes
|
||||
@ -297,10 +293,7 @@ static inline int sll_refill_small_from_ss(int class_idx, int max_take) {
|
||||
HAK_CHECK_CLASS_IDX(class_idx, "sll_refill_small_from_ss");
|
||||
atomic_fetch_add(&g_integrity_check_class_bounds, 1);
|
||||
|
||||
// CRITICAL: C7 (1KB) is headerless - incompatible with TLS SLL refill
|
||||
if (__builtin_expect(class_idx == 7, 0)) {
|
||||
return 0; // C7 uses slow path exclusively
|
||||
}
|
||||
// Phase E1-CORRECT: C7 now has headers, can use small refill
|
||||
|
||||
if (!g_use_superslab || max_take <= 0) return 0;
|
||||
// ランタイムA/B: P0を有効化している場合はバッチrefillへ委譲
|
||||
@ -353,14 +346,12 @@ static inline int sll_refill_small_from_ss(int class_idx, int max_take) {
|
||||
meta->carved++;
|
||||
meta->used++;
|
||||
|
||||
// ✅ FIX #11B: Restore header BEFORE tls_sll_push
|
||||
// Phase E1-CORRECT: Restore header BEFORE tls_sll_push
|
||||
// ROOT CAUSE: Simple refill path carves blocks but doesn't write headers.
|
||||
// tls_sll_push() expects headers at base for C0-C6 to write next at base+1.
|
||||
// Without header, base+1 contains garbage → chain corruption → SEGV!
|
||||
// tls_sll_push() expects headers at base to write next at base+1.
|
||||
// ALL classes (including C7) need headers restored!
|
||||
#if HAKMEM_TINY_HEADER_CLASSIDX
|
||||
if (class_idx != 7) {
|
||||
*(uint8_t*)p = HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK);
|
||||
}
|
||||
*(uint8_t*)p = HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK);
|
||||
#endif
|
||||
|
||||
// CRITICAL: Use Box TLS-SLL API (C7-safe, no race)
|
||||
@ -376,22 +367,24 @@ static inline int sll_refill_small_from_ss(int class_idx, int max_take) {
|
||||
// Freelist fallback
|
||||
if (__builtin_expect(meta->freelist != NULL, 0)) {
|
||||
void* p = meta->freelist;
|
||||
meta->freelist = *(void**)p;
|
||||
// BUG FIX: Use Box API to read next pointer at correct offset
|
||||
void* next = tiny_next_read(class_idx, p);
|
||||
meta->freelist = next;
|
||||
meta->used++;
|
||||
|
||||
// ✅ FIX #11B: Restore header BEFORE tls_sll_push (same as Fix #11 for freelist)
|
||||
// Phase E1-CORRECT: Restore header BEFORE tls_sll_push
|
||||
// Freelist stores next at base (offset 0), overwriting header.
|
||||
// Must restore header so tls_sll_push can write next at base+1 correctly.
|
||||
// ALL classes (including C7) need headers restored!
|
||||
#if HAKMEM_TINY_HEADER_CLASSIDX
|
||||
if (class_idx != 7) {
|
||||
*(uint8_t*)p = HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK);
|
||||
}
|
||||
*(uint8_t*)p = HEADER_MAGIC | (class_idx & HEADER_CLASS_MASK);
|
||||
#endif
|
||||
|
||||
// CRITICAL: Use Box TLS-SLL API (C7-safe, no race)
|
||||
if (!tls_sll_push(class_idx, p, sll_cap)) {
|
||||
// SLL full (should not happen, room was checked)
|
||||
*(void**)p = meta->freelist; // Rollback freelist
|
||||
// BUG FIX: Use Box API to write rollback next pointer
|
||||
tiny_next_write(class_idx, p, next); // Rollback freelist
|
||||
meta->freelist = p;
|
||||
meta->used--;
|
||||
break;
|
||||
@ -421,7 +414,8 @@ static inline int sll_refill_small_from_ss(int class_idx, int max_take) {
|
||||
while (taken < take) {
|
||||
void* p = NULL;
|
||||
if (__builtin_expect(meta->freelist != NULL, 0)) {
|
||||
p = meta->freelist; meta->freelist = *(void**)p; meta->used++;
|
||||
// BUG FIX: Use Box API to read next pointer at correct offset
|
||||
p = meta->freelist; meta->freelist = tiny_next_read(class_idx, p); meta->used++;
|
||||
// Track active blocks reserved into TLS SLL
|
||||
ss_active_inc(tls->ss);
|
||||
} else if (__builtin_expect(meta->carved < meta->capacity, 1)) {
|
||||
|
||||
Reference in New Issue
Block a user