## Summary - ChatGPT により bench_profile.h の setenv segfault を修正(RTLD_NEXT 経由に切り替え) - core/box/pool_zero_mode_box.h 新設:ENV キャッシュ経由で ZERO_MODE を統一管理 - core/hakmem_pool.c で zero mode に応じた memset 制御(FULL/header/off) - A/B テスト結果:ZERO_MODE=header で +15.34% improvement(1M iterations, C6-heavy) ## Files Modified - core/box/pool_api.inc.h: pool_zero_mode_box.h include - core/bench_profile.h: glibc setenv → malloc+putenv(segfault 回避) - core/hakmem_pool.c: zero mode 参照・制御ロジック - core/box/pool_zero_mode_box.h (新設): enum/getter - CURRENT_TASK.md: Phase ML1 結果記載 ## Test Results | Iterations | ZERO_MODE=full | ZERO_MODE=header | Improvement | |-----------|----------------|-----------------|------------| | 10K | 3.06 M ops/s | 3.17 M ops/s | +3.65% | | 1M | 23.71 M ops/s | 27.34 M ops/s | **+15.34%** | 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
69 lines
2.7 KiB
C
69 lines
2.7 KiB
C
// ss_release_guard_box.h - Box: SuperSlab Release Guard
|
|
// Purpose:
|
|
// Centralize the "can we release/recycle this slab / SuperSlab?" logic
|
|
// behind a single Box contract, to avoid scattered lifetime checks.
|
|
//
|
|
// Box Theory:
|
|
// - Single Responsibility:
|
|
// Decide whether a given (ss, slab_idx) is safe to recycle or free.
|
|
// - Clear Boundary:
|
|
// All callers go through this Box before calling shared_pool_release_slab()
|
|
// or superslab_free() directly.
|
|
// - Fail-Fast Friendly:
|
|
// Guard returns a boolean; callers decide whether to abort/log/drop.
|
|
// This Box itself is free of logging by default (optionally gated in debug).
|
|
// - Reversible / A/B:
|
|
// Existing ad-hoc checks stay in place; this Box is additive and can
|
|
// gradually replace them under build flags / environment switches.
|
|
//
|
|
// Invariants (intended, not all enforced here yet):
|
|
// - Slab recycle (slot EMPTY化) には:
|
|
// meta->used == 0
|
|
// meta->capacity > 0
|
|
// - SuperSlab munmap / cache release には:
|
|
// ss->total_active_blocks == 0
|
|
// superslab_ref_get(ss) == 0 (no TLS pins / remote pins)
|
|
//
|
|
// NOTE:
|
|
// For now this box mirrors existing logic in hakmem_shared_pool_release.c,
|
|
// without changing behaviour. It provides a single place to extend guards.
|
|
|
|
#ifndef HAKMEM_SS_RELEASE_GUARD_BOX_H
|
|
#define HAKMEM_SS_RELEASE_GUARD_BOX_H
|
|
|
|
#include "../hakmem_tiny_superslab_internal.h"
|
|
|
|
// Per-slab guard: "is it safe to mark this slab EMPTY and recycle?"
|
|
// - Checks TinySlabMeta invariants only (used/capacity) for now.
|
|
// - Does NOT inspect refcounts or remote queues (that is owned by higher boxes).
|
|
static inline bool ss_release_guard_slab_can_recycle(SuperSlab* ss,
|
|
int slab_idx,
|
|
TinySlabMeta* meta)
|
|
{
|
|
(void)ss; (void)slab_idx;
|
|
if (!meta) return false;
|
|
|
|
// Mirror slab_is_empty() from slab_recycling_box.h
|
|
if (meta->used != 0) return false;
|
|
if (meta->capacity == 0) return false;
|
|
return true;
|
|
}
|
|
|
|
// Per-SuperSlab guard: "is it safe to actually free (munmap/cache-release) this SuperSlab?"
|
|
// - Mirrors existing final check in shared_pool_release_slab():
|
|
// active_blocks == 0 && refcount == 0
|
|
static inline bool ss_release_guard_superslab_can_free(SuperSlab* ss)
|
|
{
|
|
if (!ss) return false;
|
|
|
|
uint32_t active_blocks = atomic_load_explicit(&ss->total_active_blocks,
|
|
memory_order_acquire);
|
|
uint32_t refs = superslab_ref_get(ss);
|
|
|
|
if (active_blocks != 0) return false;
|
|
if (refs != 0) return false;
|
|
return true;
|
|
}
|
|
|
|
#endif // HAKMEM_SS_RELEASE_GUARD_BOX_H
|