Files
hakmem/core/box/ss_os_acquire_box.h
Moe Charm (CI) acc64f2438 Phase ML1: Pool v1 memset 89.73% overhead 軽量化 (+15.34% improvement)
## Summary
- ChatGPT により bench_profile.h の setenv segfault を修正(RTLD_NEXT 経由に切り替え)
- core/box/pool_zero_mode_box.h 新設:ENV キャッシュ経由で ZERO_MODE を統一管理
- core/hakmem_pool.c で zero mode に応じた memset 制御(FULL/header/off)
- A/B テスト結果:ZERO_MODE=header で +15.34% improvement(1M iterations, C6-heavy)

## Files Modified
- core/box/pool_api.inc.h: pool_zero_mode_box.h include
- core/bench_profile.h: glibc setenv → malloc+putenv(segfault 回避)
- core/hakmem_pool.c: zero mode 参照・制御ロジック
- core/box/pool_zero_mode_box.h (新設): enum/getter
- CURRENT_TASK.md: Phase ML1 結果記載

## Test Results
| Iterations | ZERO_MODE=full | ZERO_MODE=header | Improvement |
|-----------|----------------|-----------------|------------|
| 10K       | 3.06 M ops/s   | 3.17 M ops/s    | +3.65%     |
| 1M        | 23.71 M ops/s  | 27.34 M ops/s   | **+15.34%** |

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
2025-12-10 09:08:18 +09:00

146 lines
4.7 KiB
C

// ss_os_acquire_box.h - SuperSlab OS Memory Acquisition Box
// Purpose: Low-level OS memory allocation (mmap/munmap) for SuperSlabs
// Box Theory: Encapsulates platform-specific aligned memory allocation
//
// Responsibilities:
// - Aligned mmap allocation (2MB boundary)
// - OOM diagnostics and error reporting
// - Global mmap counters
//
// Dependencies: None (pure OS interface)
//
// License: MIT
// Date: 2025-11-19
#ifndef HAKMEM_SS_OS_ACQUIRE_BOX_H
#define HAKMEM_SS_OS_ACQUIRE_BOX_H
#include <stdint.h>
#include <stddef.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdlib.h>
#include <sys/mman.h>
#include "madvise_guard_box.h"
// ============================================================================
// Global Counters (for debugging/diagnostics)
// ============================================================================
extern _Atomic uint64_t g_ss_mmap_count;
extern _Atomic uint64_t g_final_fallback_mmap_count;
extern _Atomic uint64_t g_ss_os_alloc_calls;
extern _Atomic uint64_t g_ss_os_free_calls;
extern _Atomic uint64_t g_ss_os_madvise_calls;
extern _Atomic uint64_t g_ss_os_madvise_fail_enomem;
extern _Atomic uint64_t g_ss_os_madvise_fail_other;
extern _Atomic uint64_t g_ss_os_huge_alloc_calls;
extern _Atomic uint64_t g_ss_os_huge_fail_calls;
extern _Atomic bool g_ss_madvise_disabled;
static inline int ss_os_stats_enabled(void) {
static int g_ss_os_stats_enabled = -1;
if (__builtin_expect(g_ss_os_stats_enabled == -1, 0)) {
const char* e = getenv("HAKMEM_SS_OS_STATS");
g_ss_os_stats_enabled = (e && *e && *e != '0') ? 1 : 0;
}
return g_ss_os_stats_enabled;
}
static inline void ss_os_stats_record_alloc(void) {
if (!ss_os_stats_enabled()) {
return;
}
atomic_fetch_add_explicit(&g_ss_os_alloc_calls, 1, memory_order_relaxed);
}
static inline void ss_os_stats_record_free(void) {
if (!ss_os_stats_enabled()) {
return;
}
atomic_fetch_add_explicit(&g_ss_os_free_calls, 1, memory_order_relaxed);
}
static inline void ss_os_stats_record_madvise(void) {
if (!ss_os_stats_enabled()) {
return;
}
atomic_fetch_add_explicit(&g_ss_os_madvise_calls, 1, memory_order_relaxed);
}
// ============================================================================
// HugePage Experiment (research-only)
// ============================================================================
static inline int ss_os_huge_enabled(void) {
static int g_ss_os_huge_enabled = -1;
if (__builtin_expect(g_ss_os_huge_enabled == -1, 0)) {
const char* e = getenv("HAKMEM_SS_HUGEPAGE_EXPERIMENT");
g_ss_os_huge_enabled = (e && *e && *e != '0') ? 1 : 0;
}
return g_ss_os_huge_enabled;
}
// Parse HAKMEM_SS_HUGEPAGE_SIZE (only "2M" supported explicitly; otherwise
// falls back to default 2MB). This is intentionally soft/experimental.
static inline size_t ss_os_huge_size_bytes(void) {
static size_t g_huge_size = 0;
if (__builtin_expect(g_huge_size == 0, 0)) {
const char* e = getenv("HAKMEM_SS_HUGEPAGE_SIZE");
if (e && *e) {
char* end = NULL;
unsigned long long v = strtoull(e, &end, 0);
if (end && (*end == 'M' || *end == 'm')) {
v *= 1024ULL * 1024ULL;
}
if (v > 0) {
g_huge_size = (size_t)v;
}
}
if (g_huge_size == 0) {
g_huge_size = (size_t)(2ULL << 20); // default 2MB
}
}
return g_huge_size;
}
static inline void ss_os_stats_record_huge_alloc(void) {
if (!ss_os_stats_enabled()) {
return;
}
atomic_fetch_add_explicit(&g_ss_os_huge_alloc_calls, 1, memory_order_relaxed);
}
static inline void ss_os_stats_record_huge_fail(void) {
if (!ss_os_stats_enabled()) {
return;
}
atomic_fetch_add_explicit(&g_ss_os_huge_fail_calls, 1, memory_order_relaxed);
}
// ============================================================================
// OS Acquisition API
// ============================================================================
// Acquire aligned SuperSlab memory from OS via mmap
//
// Parameters:
// size_class: Size class index (0-7, for statistics)
// ss_size: SuperSlab size in bytes (e.g., 2^21 = 2MB)
// ss_mask: Alignment mask (ss_size - 1)
// populate: If true, use MAP_POPULATE to prefault pages
//
// Returns: Aligned pointer or NULL on OOM
//
// Guarantees:
// - Returns NULL on OOM (never crashes)
// - Returned pointer is aligned to ss_size boundary
// - Logs OOM once per process (not spammy)
// - Updates g_ss_mmap_count counter
//
// Thread-safe: Yes (no shared state mutations except atomic counters)
void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int populate);
#endif // HAKMEM_SS_OS_ACQUIRE_BOX_H