CRITICAL FIX: TLS 未初期化による 4T SEGV を完全解消
**問題:**
- Larson 4T で 100% SEGV (1T は 2.09M ops/s で完走)
- System/mimalloc は 4T で 33.52M ops/s 正常動作
- SS OFF + Remote OFF でも 4T で SEGV
**根本原因: (Task agent ultrathink 調査結果)**
```
CRASH: mov (%r15),%r13
R15 = 0x6261 ← ASCII "ba" (ゴミ値、未初期化TLS)
```
Worker スレッドの TLS 変数が未初期化:
- `__thread void* g_tls_sll_head[TINY_NUM_CLASSES];` ← 初期化なし
- pthread_create() で生成されたスレッドでゼロ初期化されない
- NULL チェックが通過 (0x6261 != NULL) → dereference → SEGV
**修正内容:**
全 TLS 配列に明示的初期化子 `= {0}` を追加:
1. **core/hakmem_tiny.c:**
- `g_tls_sll_head[TINY_NUM_CLASSES] = {0}`
- `g_tls_sll_count[TINY_NUM_CLASSES] = {0}`
- `g_tls_live_ss[TINY_NUM_CLASSES] = {0}`
- `g_tls_bcur[TINY_NUM_CLASSES] = {0}`
- `g_tls_bend[TINY_NUM_CLASSES] = {0}`
2. **core/tiny_fastcache.c:**
- `g_tiny_fast_cache[TINY_FAST_CLASS_COUNT] = {0}`
- `g_tiny_fast_count[TINY_FAST_CLASS_COUNT] = {0}`
- `g_tiny_fast_free_head[TINY_FAST_CLASS_COUNT] = {0}`
- `g_tiny_fast_free_count[TINY_FAST_CLASS_COUNT] = {0}`
3. **core/hakmem_tiny_magazine.c:**
- `g_tls_mags[TINY_NUM_CLASSES] = {0}`
4. **core/tiny_sticky.c:**
- `g_tls_sticky_ss[TINY_NUM_CLASSES][TINY_STICKY_RING] = {0}`
- `g_tls_sticky_idx[TINY_NUM_CLASSES][TINY_STICKY_RING] = {0}`
- `g_tls_sticky_pos[TINY_NUM_CLASSES] = {0}`
**効果:**
```
Before: 1T: 2.09M ✅ | 4T: SEGV 💀
After: 1T: 2.41M ✅ | 4T: 4.19M ✅ (+15% 1T, SEGV解消)
```
**テスト:**
```bash
# 1 thread: 完走
./larson_hakmem 2 8 128 1024 1 12345 1
→ Throughput = 2,407,597 ops/s ✅
# 4 threads: 完走(以前は SEGV)
./larson_hakmem 2 8 128 1024 1 12345 4
→ Throughput = 4,192,155 ops/s ✅
```
**調査協力:** Task agent (ultrathink mode) による完璧な根本原因特定
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
140
core/box/pool_init_api.inc.h
Normal file
140
core/box/pool_init_api.inc.h
Normal file
@ -0,0 +1,140 @@
|
||||
// pool_init_api.inc.h — Box: L2 Pool init/shutdown + MF2 debug
|
||||
#ifndef POOL_INIT_API_INC_H
|
||||
#define POOL_INIT_API_INC_H
|
||||
|
||||
// Thread-safe initialization using pthread_once
|
||||
static pthread_once_t hak_pool_init_once_control = PTHREAD_ONCE_INIT;
|
||||
static void hak_pool_init_impl(void) {
|
||||
const FrozenPolicy* pol = hkm_policy_get();
|
||||
if (pol && pol->mid_dyn1_bytes >= POOL_MIN_SIZE && pol->mid_dyn1_bytes <= POOL_MAX_SIZE) {
|
||||
g_class_sizes[5] = pol->mid_dyn1_bytes;
|
||||
} else {
|
||||
g_class_sizes[5] = 0;
|
||||
}
|
||||
if (pol && pol->mid_dyn2_bytes >= POOL_MIN_SIZE && pol->mid_dyn2_bytes <= POOL_MAX_SIZE) {
|
||||
g_class_sizes[6] = pol->mid_dyn2_bytes;
|
||||
} else {
|
||||
g_class_sizes[6] = 0;
|
||||
}
|
||||
for (int c = 0; c < POOL_NUM_CLASSES; c++) {
|
||||
for (int s = 0; s < POOL_NUM_SHARDS; s++) {
|
||||
g_pool.freelist[c][s] = NULL;
|
||||
}
|
||||
atomic_store(&g_pool.nonempty_mask[c], 0);
|
||||
for (int s = 0; s < POOL_NUM_SHARDS; s++) {
|
||||
pthread_mutex_init(&g_pool.freelist_locks[c][s].m, NULL);
|
||||
atomic_store(&g_pool.remote_head[c][s], (uintptr_t)0);
|
||||
atomic_store(&g_pool.remote_count[c][s], 0);
|
||||
}
|
||||
g_pool.hits[c] = 0;
|
||||
g_pool.misses[c] = 0;
|
||||
g_pool.refills[c] = 0;
|
||||
g_pool.frees[c] = 0;
|
||||
g_pool.pages_by_class[c] = 0;
|
||||
g_pool.bundle_factor[c] = 1;
|
||||
g_pool.last_hits[c] = 0;
|
||||
g_pool.last_misses[c] = 0;
|
||||
}
|
||||
g_pool.total_bytes_allocated = 0;
|
||||
g_pool.total_pages_allocated = 0;
|
||||
atomic_store(&g_pool.trylock_attempts, 0);
|
||||
atomic_store(&g_pool.trylock_success, 0);
|
||||
atomic_store(&g_pool.ring_underflow, 0);
|
||||
const char* e_tls = getenv("HAKMEM_POOL_TLS_FREE");
|
||||
g_pool.tls_free_enabled = (e_tls == NULL) ? 1 : (atoi(e_tls) != 0);
|
||||
const char* e_wrap = getenv("HAKMEM_WRAP_L2");
|
||||
g_wrap_l2_enabled = (e_wrap && atoi(e_wrap) != 0) ? 1 : 0;
|
||||
const char* e_minb = getenv("HAKMEM_POOL_MIN_BUNDLE");
|
||||
if (e_minb) { int v = atoi(e_minb); if (v >= 1 && v <= 8) g_pool_min_bundle = v; }
|
||||
const char* e_mix = getenv("HAKMEM_SHARD_MIX");
|
||||
g_shard_mix_enabled = (e_mix && atoi(e_mix) != 0) ? 1 : 0;
|
||||
const char* e_ring = getenv("HAKMEM_POOL_TLS_RING");
|
||||
if (e_ring) g_tls_ring_enabled = (atoi(e_ring) != 0);
|
||||
const char* e_hdr = getenv("HAKMEM_HDR_LIGHT");
|
||||
if (e_hdr) g_hdr_light_enabled = atoi(e_hdr);
|
||||
const char* e_probe = getenv("HAKMEM_TRYLOCK_PROBES");
|
||||
if (e_probe) { int v = atoi(e_probe); if (v>=1 && v<=8) g_trylock_probes = v; }
|
||||
const char* e_div = getenv("HAKMEM_RING_RETURN_DIV");
|
||||
if (e_div) { int v = atoi(e_div); if (v>=2 && v<=4) g_ring_return_div = v; }
|
||||
const char* e_lo = getenv("HAKMEM_TLS_LO_MAX");
|
||||
if (e_lo) { int v = atoi(e_lo); if (v>=32 && v<=16384) g_tls_lo_max = v; }
|
||||
const char* e_cs = getenv("HAKMEM_POOL_COUNT_SAMPLE");
|
||||
if (e_cs) { int v = atoi(e_cs); if (v>=0 && v<=16) g_count_sample_exp = v; }
|
||||
const char* e_tc = getenv("HAKMEM_TC_ENABLE");
|
||||
if (e_tc) g_tc_enabled = (atoi(e_tc) != 0);
|
||||
const char* e_tcu = getenv("HAKMEM_TC_UNBOUNDED");
|
||||
if (e_tcu) g_tc_drain_unbounded = (atoi(e_tcu) != 0);
|
||||
const char* e_tcm = getenv("HAKMEM_TC_DRAIN_MAX");
|
||||
if (e_tcm) { int v = atoi(e_tcm); if (v>=0 && v<=65536) g_tc_drain_max = v; }
|
||||
const char* e_tct = getenv("HAKMEM_TC_DRAIN_TRIGGER");
|
||||
if (e_tct) { int v = atoi(e_tct); if (v>=0 && v<=POOL_L2_RING_CAP) g_tc_drain_trigger = v; }
|
||||
const char* e_mf2 = getenv("HAKMEM_MF2_ENABLE");
|
||||
if (e_mf2 && atoi(e_mf2) != 0) {
|
||||
g_mf2_enabled = 1;
|
||||
mf2_page_registry_init();
|
||||
const char* e_maxq = getenv("HAKMEM_MF2_MAX_QUEUES");
|
||||
if (e_maxq) { int v = atoi(e_maxq); if (v>=1 && v<=256) g_mf2_max_queues = v; }
|
||||
const char* e_lease = getenv("HAKMEM_MF2_LEASE_MS");
|
||||
if (e_lease) { int v = atoi(e_lease); if (v>=0 && v<=1000) g_mf2_lease_ms = v; }
|
||||
const char* e_idle = getenv("HAKMEM_MF2_IDLE_THRESHOLD_US");
|
||||
if (e_idle) { int v = atoi(e_idle); if (v>=0 && v<=10000) g_mf2_idle_threshold_us = v; }
|
||||
HAKMEM_LOG("[Pool] MF2 Per-Page Sharding enabled\n");
|
||||
HAKMEM_LOG("[MF2] max_queues=%d, lease_ms=%d, idle_threshold_us=%d\n", g_mf2_max_queues, g_mf2_lease_ms, g_mf2_idle_threshold_us);
|
||||
}
|
||||
g_pool.initialized = 1;
|
||||
HAKMEM_LOG("[Pool] Initialized (L2 Hybrid Pool)\n");
|
||||
if (g_class_sizes[5] != 0 || g_class_sizes[6] != 0) {
|
||||
HAKMEM_LOG("[Pool] Classes: 2KB, 4KB, 8KB, 16KB, 32KB%s%s%s\n",
|
||||
g_class_sizes[5] ? ", dyn1=" : "",
|
||||
g_class_sizes[5] ? "" : (g_class_sizes[6]?",":""),
|
||||
(g_class_sizes[5]||g_class_sizes[6]) ? "" : "");
|
||||
} else {
|
||||
HAKMEM_LOG("[Pool] Classes: 2KB, 4KB, 8KB, 16KB, 32KB\n");
|
||||
}
|
||||
HAKMEM_LOG("[Pool] Page size: %d KB\n", POOL_PAGE_SIZE / 1024);
|
||||
HAKMEM_LOG("[Pool] Shards: %d (site-based)\n", POOL_NUM_SHARDS);
|
||||
}
|
||||
|
||||
void hak_pool_init(void) { pthread_once(&hak_pool_init_once_control, hak_pool_init_impl); }
|
||||
|
||||
static void mf2_print_debug_stats(void) {
|
||||
if (!g_mf2_enabled) return;
|
||||
fprintf(stderr, "\n[MF2 DEBUG STATS]\n");
|
||||
fprintf(stderr, "Alloc fast hits: %12lu\n", (unsigned long)atomic_load(&g_mf2_alloc_fast_hit));
|
||||
fprintf(stderr, "Alloc slow hits: %12lu\n", (unsigned long)atomic_load(&g_mf2_alloc_slow_hit));
|
||||
fprintf(stderr, "Page reuses: %12lu\n", (unsigned long)atomic_load(&g_mf2_page_reuse_count));
|
||||
fprintf(stderr, "New pages: %12lu\n", (unsigned long)atomic_load(&g_mf2_new_page_count));
|
||||
fprintf(stderr, "Owner frees: %12lu\n", (unsigned long)atomic_load(&g_mf2_free_owner_count));
|
||||
fprintf(stderr, "Remote frees: %12lu\n", (unsigned long)atomic_load(&g_mf2_free_remote_count));
|
||||
fprintf(stderr, "Slow checked: %12lu\n", (unsigned long)atomic_load(&g_mf2_slow_checked_drain));
|
||||
fprintf(stderr, "Slow found rem: %12lu\n", (unsigned long)atomic_load(&g_mf2_slow_found_remote));
|
||||
fprintf(stderr, "Full scan chk: %12lu\n", (unsigned long)atomic_load(&g_mf2_full_scan_checked));
|
||||
fprintf(stderr, "Full scan rem: %12lu\n", (unsigned long)atomic_load(&g_mf2_full_scan_found_remote));
|
||||
fprintf(stderr, "Eager scan: %12lu\n", (unsigned long)atomic_load(&g_mf2_eager_drain_scanned));
|
||||
fprintf(stderr, "Eager found: %12lu\n", (unsigned long)atomic_load(&g_mf2_eager_drain_found));
|
||||
fprintf(stderr, "Drain attempts: %12lu\n", (unsigned long)atomic_load(&g_mf2_drain_attempts));
|
||||
fprintf(stderr, "Drain successes: %12lu\n", (unsigned long)atomic_load(&g_mf2_drain_success));
|
||||
fprintf(stderr, "Remote drains: %12lu (blocks: %lu)\n",
|
||||
(unsigned long)atomic_load(&g_mf2_drain_count),
|
||||
(unsigned long)atomic_load(&g_mf2_drain_blocks));
|
||||
fprintf(stderr, "\n[PENDING QUEUE]\n");
|
||||
fprintf(stderr, "Pending enqueued: %12lu\n", (unsigned long)atomic_load(&g_mf2_pending_enqueued));
|
||||
fprintf(stderr, "Pending drained: %12lu\n", (unsigned long)atomic_load(&g_mf2_pending_drained));
|
||||
fprintf(stderr, "Pending requeued: %12lu\n", (unsigned long)atomic_load(&g_mf2_pending_requeued));
|
||||
uint64_t total_allocs = atomic_load(&g_mf2_alloc_fast_hit) + atomic_load(&g_mf2_alloc_slow_hit);
|
||||
uint64_t total_frees = atomic_load(&g_mf2_free_owner_count) + atomic_load(&g_mf2_free_remote_count);
|
||||
if (total_allocs > 0) fprintf(stderr, "\nFast path hit rate: %.2f%%\n", 100.0 * atomic_load(&g_mf2_alloc_fast_hit) / total_allocs);
|
||||
if (total_frees > 0) fprintf(stderr, "Owner free rate: %.2f%%\n", 100.0 * atomic_load(&g_mf2_free_owner_count) / total_frees);
|
||||
fflush(stderr);
|
||||
}
|
||||
|
||||
__attribute__((destructor)) static void mf2_destructor(void) { mf2_print_debug_stats(); }
|
||||
|
||||
void hak_pool_shutdown(void) {
|
||||
if (!g_pool.initialized) return;
|
||||
hak_pool_print_stats();
|
||||
mf2_print_debug_stats();
|
||||
g_pool.initialized = 0;
|
||||
}
|
||||
|
||||
#endif // POOL_INIT_API_INC_H
|
||||
Reference in New Issue
Block a user