// ss_os_acquire_box.h - SuperSlab OS Memory Acquisition Box // Purpose: Low-level OS memory allocation (mmap/munmap) for SuperSlabs // Box Theory: Encapsulates platform-specific aligned memory allocation // // Responsibilities: // - Aligned mmap allocation (2MB boundary) // - OOM diagnostics and error reporting // - Global mmap counters // // Dependencies: None (pure OS interface) // // License: MIT // Date: 2025-11-19 #ifndef HAKMEM_SS_OS_ACQUIRE_BOX_H #define HAKMEM_SS_OS_ACQUIRE_BOX_H #include #include #include #include #include #include #include #include // ============================================================================ // Global Counters (for debugging/diagnostics) // ============================================================================ extern _Atomic uint64_t g_ss_mmap_count; extern _Atomic uint64_t g_final_fallback_mmap_count; extern _Atomic uint64_t g_ss_os_alloc_calls; extern _Atomic uint64_t g_ss_os_free_calls; extern _Atomic uint64_t g_ss_os_madvise_calls; extern _Atomic uint64_t g_ss_os_madvise_fail_enomem; extern _Atomic uint64_t g_ss_os_madvise_fail_other; extern _Atomic uint64_t g_ss_os_huge_alloc_calls; extern _Atomic uint64_t g_ss_os_huge_fail_calls; extern _Atomic bool g_ss_madvise_disabled; static inline int ss_os_stats_enabled(void) { static int g_ss_os_stats_enabled = -1; if (__builtin_expect(g_ss_os_stats_enabled == -1, 0)) { const char* e = getenv("HAKMEM_SS_OS_STATS"); g_ss_os_stats_enabled = (e && *e && *e != '0') ? 1 : 0; } return g_ss_os_stats_enabled; } static inline void ss_os_stats_record_alloc(void) { if (!ss_os_stats_enabled()) { return; } atomic_fetch_add_explicit(&g_ss_os_alloc_calls, 1, memory_order_relaxed); } static inline void ss_os_stats_record_free(void) { if (!ss_os_stats_enabled()) { return; } atomic_fetch_add_explicit(&g_ss_os_free_calls, 1, memory_order_relaxed); } static inline void ss_os_stats_record_madvise(void) { if (!ss_os_stats_enabled()) { return; } atomic_fetch_add_explicit(&g_ss_os_madvise_calls, 1, memory_order_relaxed); } // ============================================================================ // madvise guard (shared by Superslab hot/cold paths) // ============================================================================ // static inline int ss_os_madvise_guarded(void* ptr, size_t len, int advice, const char* where) { (void)where; if (!ptr || len == 0) { return 0; } if (atomic_load_explicit(&g_ss_madvise_disabled, memory_order_relaxed)) { return 0; } int ret = madvise(ptr, len, advice); ss_os_stats_record_madvise(); if (ret == 0) { return 0; } int e = errno; if (e == ENOMEM) { atomic_fetch_add_explicit(&g_ss_os_madvise_fail_enomem, 1, memory_order_relaxed); atomic_store_explicit(&g_ss_madvise_disabled, true, memory_order_relaxed); #if !HAKMEM_BUILD_RELEASE static _Atomic bool g_ss_madvise_enomem_logged = false; bool already = atomic_exchange_explicit(&g_ss_madvise_enomem_logged, true, memory_order_relaxed); if (!already) { fprintf(stderr, "[SS_OS_MADVISE] madvise(advice=%d, ptr=%p, len=%zu) failed with ENOMEM " "(vm.max_map_count reached?). Disabling further madvise calls.\n", advice, ptr, len); } #endif return 0; // soft fail, do not propagate ENOMEM } atomic_fetch_add_explicit(&g_ss_os_madvise_fail_other, 1, memory_order_relaxed); if (e == EINVAL) { errno = e; return -1; // let caller decide (strict mode) } errno = e; return 0; } // ============================================================================ // HugePage Experiment (research-only) // ============================================================================ static inline int ss_os_huge_enabled(void) { static int g_ss_os_huge_enabled = -1; if (__builtin_expect(g_ss_os_huge_enabled == -1, 0)) { const char* e = getenv("HAKMEM_SS_HUGEPAGE_EXPERIMENT"); g_ss_os_huge_enabled = (e && *e && *e != '0') ? 1 : 0; } return g_ss_os_huge_enabled; } // Parse HAKMEM_SS_HUGEPAGE_SIZE (only "2M" supported explicitly; otherwise // falls back to default 2MB). This is intentionally soft/experimental. static inline size_t ss_os_huge_size_bytes(void) { static size_t g_huge_size = 0; if (__builtin_expect(g_huge_size == 0, 0)) { const char* e = getenv("HAKMEM_SS_HUGEPAGE_SIZE"); if (e && *e) { char* end = NULL; unsigned long long v = strtoull(e, &end, 0); if (end && (*end == 'M' || *end == 'm')) { v *= 1024ULL * 1024ULL; } if (v > 0) { g_huge_size = (size_t)v; } } if (g_huge_size == 0) { g_huge_size = (size_t)(2ULL << 20); // default 2MB } } return g_huge_size; } static inline void ss_os_stats_record_huge_alloc(void) { if (!ss_os_stats_enabled()) { return; } atomic_fetch_add_explicit(&g_ss_os_huge_alloc_calls, 1, memory_order_relaxed); } static inline void ss_os_stats_record_huge_fail(void) { if (!ss_os_stats_enabled()) { return; } atomic_fetch_add_explicit(&g_ss_os_huge_fail_calls, 1, memory_order_relaxed); } // ============================================================================ // OS Acquisition API // ============================================================================ // Acquire aligned SuperSlab memory from OS via mmap // // Parameters: // size_class: Size class index (0-7, for statistics) // ss_size: SuperSlab size in bytes (e.g., 2^21 = 2MB) // ss_mask: Alignment mask (ss_size - 1) // populate: If true, use MAP_POPULATE to prefault pages // // Returns: Aligned pointer or NULL on OOM // // Guarantees: // - Returns NULL on OOM (never crashes) // - Returned pointer is aligned to ss_size boundary // - Logs OOM once per process (not spammy) // - Updates g_ss_mmap_count counter // // Thread-safe: Yes (no shared state mutations except atomic counters) void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int populate); #endif // HAKMEM_SS_OS_ACQUIRE_BOX_H