Files
hakmem/core/box/superslab_expansion_box.c
Moe Charm (CI) acc64f2438 Phase ML1: Pool v1 memset 89.73% overhead 軽量化 (+15.34% improvement)
## Summary
- ChatGPT により bench_profile.h の setenv segfault を修正(RTLD_NEXT 経由に切り替え)
- core/box/pool_zero_mode_box.h 新設:ENV キャッシュ経由で ZERO_MODE を統一管理
- core/hakmem_pool.c で zero mode に応じた memset 制御(FULL/header/off)
- A/B テスト結果:ZERO_MODE=header で +15.34% improvement(1M iterations, C6-heavy)

## Files Modified
- core/box/pool_api.inc.h: pool_zero_mode_box.h include
- core/bench_profile.h: glibc setenv → malloc+putenv(segfault 回避)
- core/hakmem_pool.c: zero mode 参照・制御ロジック
- core/box/pool_zero_mode_box.h (新設): enum/getter
- CURRENT_TASK.md: Phase ML1 結果記載

## Test Results
| Iterations | ZERO_MODE=full | ZERO_MODE=header | Improvement |
|-----------|----------------|-----------------|------------|
| 10K       | 3.06 M ops/s   | 3.17 M ops/s    | +3.65%     |
| 1M        | 23.71 M ops/s  | 27.34 M ops/s   | **+15.34%** |

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
2025-12-10 09:08:18 +09:00

278 lines
9.7 KiB
C

// superslab_expansion_box.c - Box E: SuperSlab Expansion Implementation
// Purpose: Safe SuperSlab expansion with TLS state guarantee
// Box Theory: Complete encapsulation of expansion logic
//
// License: MIT
// Date: 2025-11-12
#include "superslab_expansion_box.h"
#include "../hakmem_tiny_superslab.h" // expand_superslab_head(), g_superslab_heads
#include "../hakmem_tiny_superslab_internal.h"
#include "../hakmem_tiny_superslab_constants.h" // SUPERSLAB_SLAB0_DATA_OFFSET
#include <stdio.h>
#include <string.h>
// External SuperSlabHead array (defined in hakmem_tiny_superslab.c)
extern SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS];
// External lock depth for safe fprintf in malloc context
extern __thread int g_hakmem_lock_depth;
// ============================================================================
// Box E: Core API Implementation
// ============================================================================
// Note: We don't implement expansion_capture_tls_state here because it requires
// access to g_tls_slabs, which is static in hakmem_tiny.c. The caller should
// capture state directly from their local g_tls_slabs reference.
ExpansionResult expansion_expand_with_tls_guarantee(
SuperSlabHead* head,
uint8_t class_idx)
{
ExpansionResult result;
memset(&result, 0, sizeof(result));
result.success = false;
result.error_code = -2; // Invalid params
// Validate parameters
if (!head || class_idx >= TINY_NUM_CLASSES_SS) {
return result;
}
// CRITICAL: Call existing expand_superslab_head() with mutex protection
// This function already handles:
// 1. Mutex lock/unlock (head->expansion_lock)
// 2. Double-check pattern (re-verify after lock)
// 3. Chunk allocation and linking
// 4. current_chunk update
int expand_result = expand_superslab_head(head);
if (expand_result < 0) {
// Expansion failed (OOM)
result.success = false;
result.error_code = -1; // OOM
return result;
}
// Expansion succeeded
// CRITICAL FIX: Bind slab 0 immediately to prevent NULL meta SEGV
// The new chunk always has slab 0 available after expansion
SuperSlab* new_ss = head->current_chunk;
// Initialize slab 0 metadata (set capacity, mark as active in bitmap)
extern void superslab_init_slab(SuperSlab* ss, int slab_idx, size_t block_size, uint32_t owner_tid);
extern const size_t g_tiny_class_sizes[];
uint32_t my_tid = (uint32_t)(uintptr_t)pthread_self();
size_t block_size = g_tiny_class_sizes[class_idx];
superslab_init_slab(new_ss, 0, block_size, my_tid);
// CRITICAL FIX: Explicitly set class_idx to avoid C0/C7 confusion.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
new_ss->slabs[0].class_idx = (uint8_t)class_idx;
// P1.1: Update class_map after expansion
new_ss->class_map[0] = (uint8_t)class_idx;
// Now bind slab 0 to TLS state
result.new_state.ss = new_ss;
result.new_state.class_idx = class_idx;
result.new_state.slab_idx = 0; // Always bind slab 0 after expansion
result.new_state.meta = &new_ss->slabs[0]; // Point to slab 0 metadata
// Calculate slab_base using tiny_slab_base_for_geometry logic
// Slab 0 has offset SUPERSLAB_SLAB0_DATA_OFFSET (2048 bytes)
// Formula: base = ss + (slab_idx * SLAB_SIZE) + (slab_idx == 0 ? SLAB0_OFFSET : 0)
result.new_state.slab_base = (uint8_t*)new_ss + SUPERSLAB_SLAB0_DATA_OFFSET;
// Debug: log backend used for expansion (first few only)
static _Atomic uint32_t g_ss_backend_log = 0;
uint32_t n = atomic_fetch_add_explicit(&g_ss_backend_log, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr, "[SS_BACKEND] expand legacy cls=%d ss=%p slab_idx=0 base=%p\n",
class_idx, (void*)new_ss, result.new_state.slab_base);
}
result.success = true;
result.error_code = 0;
return result;
}
void expansion_apply_tls_state(
uint8_t class_idx,
const ExpansionTLSState* new_state,
TinyTLSSlab* tls_array)
{
if (!new_state || !tls_array || class_idx >= TINY_NUM_CLASSES_SS) {
return;
}
TinyTLSSlab* tls = &tls_array[class_idx];
// CRITICAL FIX: Apply complete TLS state from expansion
// This ensures meta and slab_base are NEVER NULL after expansion
tls->ss = new_state->ss;
tls->meta = new_state->meta; // ✅ Now points to slab 0!
tls->slab_base = new_state->slab_base; // ✅ Now points to slab 0 base!
tls->slab_idx = new_state->slab_idx; // ✅ Now 0 (slab 0)
}
// ============================================================================
// Box E: Debug & Validation Implementation
// ============================================================================
#if !defined(HAKMEM_BUILD_RELEASE) || defined(HAKMEM_EXPANSION_BOX_DEBUG)
bool expansion_validate_tls_state(
const ExpansionTLSState* state,
const char* context)
{
if (!state) {
return false;
}
// Allow NULL ss (initial state before any allocation)
if (!state->ss) {
return true;
}
// Validate SuperSlab magic
if (state->ss->magic != SUPERSLAB_MAGIC) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VAL] %s: Invalid SuperSlab magic: 0x%016llx (expected 0x%016llx)\n",
context, (unsigned long long)state->ss->magic, (unsigned long long)SUPERSLAB_MAGIC);
g_hakmem_lock_depth--;
return false;
}
// Validate class consistency
if (state->ss->size_class != state->class_idx) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VAL] %s: Class mismatch: ss->size_class=%u, state->class_idx=%u\n",
context, state->ss->size_class, state->class_idx);
g_hakmem_lock_depth--;
return false;
}
// Validate slab index bounds
int capacity = (state->ss->lg_size == 21) ? 32 : 16; // 2MB=32 slabs, 1MB=16 slabs
if (state->slab_idx >= capacity) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VAL] %s: slab_idx out of bounds: %u >= %d\n",
context, state->slab_idx, capacity);
g_hakmem_lock_depth--;
return false;
}
// Validate meta pointer alignment (should point into ss->slabs array)
if (state->meta) {
TinySlabMeta* expected_meta = &state->ss->slabs[state->slab_idx];
if (state->meta != expected_meta) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VAL] %s: meta pointer mismatch: %p (expected %p)\n",
context, (void*)state->meta, (void*)expected_meta);
g_hakmem_lock_depth--;
return false;
}
}
// Validate slab_base alignment (should be within SuperSlab memory range)
if (state->slab_base) {
uintptr_t ss_start = (uintptr_t)state->ss;
size_t ss_size = (size_t)1 << state->ss->lg_size;
uintptr_t ss_end = ss_start + ss_size;
uintptr_t base_addr = (uintptr_t)state->slab_base;
if (base_addr < ss_start || base_addr >= ss_end) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VAL] %s: slab_base out of range: %p (ss: %p - %p)\n",
context, (void*)state->slab_base, (void*)ss_start, (void*)ss_end);
g_hakmem_lock_depth--;
return false;
}
}
return true;
}
bool expansion_verify_expansion(
SuperSlabHead* head,
const ExpansionTLSState* old_state,
const ExpansionTLSState* new_state)
{
if (!head || !old_state || !new_state) {
return false;
}
// Verify new chunk is set
if (!new_state->ss) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VERIFY] New state has NULL SuperSlab\n");
g_hakmem_lock_depth--;
return false;
}
// Verify current_chunk was updated
if (head->current_chunk != new_state->ss) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VERIFY] current_chunk mismatch: head=%p, new_state=%p\n",
(void*)head->current_chunk, (void*)new_state->ss);
g_hakmem_lock_depth--;
return false;
}
// Verify new chunk has available capacity (bitmap should not be full)
int capacity = (new_state->ss->lg_size == 21) ? 32 : 16;
uint32_t full_mask = (capacity >= 32) ? 0xFFFFFFFF : ((1U << capacity) - 1);
if (new_state->ss->slab_bitmap == full_mask) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VERIFY] New chunk has no free slabs: bitmap=0x%08x\n",
new_state->ss->slab_bitmap);
g_hakmem_lock_depth--;
return false;
}
// Verify total_chunks was incremented (if we can check old value)
// Note: We can't reliably check this without capturing old value
// But we can verify it's at least 1
size_t total = atomic_load_explicit(&head->total_chunks, memory_order_relaxed);
if (total == 0) {
g_hakmem_lock_depth++;
fprintf(stderr, "[EXPANSION_VERIFY] total_chunks is 0 after expansion\n");
g_hakmem_lock_depth--;
return false;
}
return true;
}
void expansion_log_event(
const char* event,
uint8_t class_idx,
const ExpansionTLSState* state)
{
if (!event || !state) {
return;
}
g_hakmem_lock_depth++;
if (state->ss) {
fprintf(stderr, "[EXPANSION] class=%u %s: ss=%p, bitmap=0x%08x, active=%u, slab_idx=%u\n",
class_idx, event,
(void*)state->ss,
state->ss->slab_bitmap,
state->ss->active_slabs,
state->slab_idx);
} else {
fprintf(stderr, "[EXPANSION] class=%u %s: ss=NULL (initial state)\n",
class_idx, event);
}
g_hakmem_lock_depth--;
}
#endif // !HAKMEM_BUILD_RELEASE || HAKMEM_EXPANSION_BOX_DEBUG