Guard madvise ENOMEM and stabilize pool/tiny front v3

This commit is contained in:
Moe Charm (CI)
2025-12-09 21:50:15 +09:00
parent e274d5f6a9
commit a905e0ffdd
45 changed files with 3154 additions and 242 deletions

View File

@ -4,6 +4,7 @@
#include "pagefault_telemetry_box.h" // Box PageFaultTelemetry (PF_BUCKET_MID)
#include "box/pool_hotbox_v2_box.h"
#include "box/tiny_heap_env_box.h" // TinyHeap profile (C7_SAFE では flatten を無効化)
// Pool v2 is experimental. Default OFF (use legacy v1 path).
static inline int hak_pool_v2_enabled(void) {
@ -40,6 +41,12 @@ static inline int hak_pool_v2_tls_fast_enabled(void) {
static inline int hak_pool_v1_flatten_enabled(void) {
static int g = -1;
if (__builtin_expect(g == -1, 0)) {
// C7_SAFE/C7_ULTRA_BENCH プロファイルでは、安全側で強制 OFF
int mode = tiny_heap_profile_mode();
if (mode == TINY_HEAP_PROFILE_C7_SAFE || mode == TINY_HEAP_PROFILE_C7_ULTRA_BENCH) {
g = 0;
return g;
}
const char* e = getenv("HAKMEM_POOL_V1_FLATTEN_ENABLED");
g = (e && *e && *e != '0') ? 1 : 0;
}

View File

@ -0,0 +1,86 @@
// pool_hotbox_v2_box.h — Experimental PoolHotBox v2 (hot path scaffold)
#ifndef POOL_HOTBOX_V2_BOX_H
#define POOL_HOTBOX_V2_BOX_H
#include <stdint.h>
#include <stdlib.h>
#include <stdatomic.h>
#include "hakmem_pool.h" // for POOL_NUM_CLASSES and size helpers
// ENV gates (bench/実験専用):
// HAKMEM_POOL_V2_ENABLED : overall ON/OFF (default OFF)
// HAKMEM_POOL_V2_CLASSES : bitmask, bit i=1 → class i を HotBox v2 に載せる
// HAKMEM_POOL_V2_STATS : stats dump ON/OFF
typedef struct PoolHotBoxV2Stats {
_Atomic uint64_t alloc_calls;
_Atomic uint64_t alloc_fast;
_Atomic uint64_t alloc_refill;
_Atomic uint64_t alloc_refill_fail;
_Atomic uint64_t alloc_fallback_v1;
_Atomic uint64_t free_calls;
_Atomic uint64_t free_fast;
_Atomic uint64_t free_fallback_v1;
_Atomic uint64_t page_of_fail_header_missing;
_Atomic uint64_t page_of_fail_out_of_range;
_Atomic uint64_t page_of_fail_misaligned;
_Atomic uint64_t page_of_fail_unknown;
} PoolHotBoxV2Stats;
// Simple page/class structs for future HotBox v2 implementation.
typedef struct pool_page_v2 {
void* freelist;
uint32_t used;
uint32_t capacity;
uint32_t block_size;
uint32_t class_idx;
void* base;
void* slab_ref;
struct pool_page_v2* next;
} pool_page_v2;
typedef struct pool_class_v2 {
pool_page_v2* current;
pool_page_v2* partial;
uint16_t max_partial_pages;
uint16_t partial_count;
uint32_t block_size;
} pool_class_v2;
typedef struct pool_ctx_v2 {
pool_class_v2 cls[POOL_NUM_CLASSES];
} pool_ctx_v2;
typedef struct PoolColdIface {
void* (*refill_page)(void* cold_ctx,
uint32_t class_idx,
uint32_t* out_block_size,
uint32_t* out_capacity,
void** out_slab_ref);
void (*retire_page)(void* cold_ctx,
uint32_t class_idx,
void* slab_ref,
void* base);
} PoolColdIface;
// ENV helpers
int pool_hotbox_v2_class_enabled(int class_idx);
int pool_hotbox_v2_stats_enabled(void);
// TLS/context helpers
pool_ctx_v2* pool_v2_tls_get(void);
// Hot path (currently stubbed to always fall back to v1; structure only)
void* pool_hotbox_v2_alloc(uint32_t class_idx, size_t size, uintptr_t site_id);
int pool_hotbox_v2_free(uint32_t class_idx, void* raw_block);
// Stats helpers
void pool_hotbox_v2_record_free_call(uint32_t class_idx);
void pool_hotbox_v2_record_alloc_fallback(uint32_t class_idx);
void pool_hotbox_v2_record_free_fallback(uint32_t class_idx);
// Stats export (destructor in hakmem_pool.c)
extern PoolHotBoxV2Stats g_pool_hotbox_v2_stats[POOL_NUM_CLASSES];
#endif // POOL_HOTBOX_V2_BOX_H

View File

@ -0,0 +1,33 @@
// pool_hotbox_v2_header_box.h
// Small helpers for embedding/reading the v2 pool page pointer in the page header.
#pragma once
#include <stdint.h>
// Mask a pointer down to the page base (POOL_PAGE_SIZE is a power of two).
static inline void* pool_hotbox_v2_page_base(void* ptr, size_t page_size) {
return (void*)((uintptr_t)ptr & ~((uintptr_t)page_size - 1));
}
// Store the PoolHotBox v2 page pointer into the page header.
// Caller must ensure base is page_size aligned and non-NULL.
static inline void pool_hotbox_v2_header_store(void* page_base, void* page_ptr) {
if (!page_base) return;
void** hdr = (void**)page_base;
*hdr = page_ptr;
}
// Clear the page header pointer (used on retire to avoid stale lookups).
static inline void pool_hotbox_v2_header_clear(void* page_base) {
if (!page_base) return;
void** hdr = (void**)page_base;
*hdr = NULL;
}
// Load the page pointer from the page header (may return NULL).
static inline void* pool_hotbox_v2_header_load(void* page_base) {
if (!page_base) return NULL;
void** hdr = (void**)page_base;
return *hdr;
}

View File

@ -41,6 +41,22 @@ static void mid_desc_register(void* page, int class_idx, uint64_t owner_tid) {
void* canonical_page = (void*)((uintptr_t)page & ~((uintptr_t)POOL_PAGE_SIZE - 1));
uint32_t h = mid_desc_hash(canonical_page);
pthread_mutex_lock(&g_mid_desc_mu[h]);
// Check if descriptor already exists
MidPageDesc* existing = g_mid_desc_head[h];
while (existing) {
if (existing->page == canonical_page) {
// Descriptor already exists, update owner_tid if needed
if (existing->owner_tid == 0 && owner_tid != 0) {
existing->owner_tid = owner_tid;
}
pthread_mutex_unlock(&g_mid_desc_mu[h]);
return;
}
existing = existing->next;
}
// Descriptor doesn't exist, create new one
MidPageDesc* d = (MidPageDesc*)hkm_libc_malloc(sizeof(MidPageDesc)); // P0 Fix: Use libc malloc
if (d) {
d->page = canonical_page;
@ -76,7 +92,16 @@ static void mid_desc_adopt(void* addr, int class_idx, uint64_t owner_tid) {
if (d->owner_tid == 0) d->owner_tid = owner_tid;
} else {
MidPageDesc* nd = (MidPageDesc*)hkm_libc_malloc(sizeof(MidPageDesc)); // P0 Fix: Use libc malloc
if (nd) { nd->page = page; nd->class_idx = (uint8_t)class_idx; nd->owner_tid = owner_tid; nd->next = g_mid_desc_head[h]; g_mid_desc_head[h] = nd; }
if (nd) {
nd->page = page;
nd->class_idx = (uint8_t)class_idx;
nd->owner_tid = owner_tid;
nd->next = g_mid_desc_head[h];
atomic_store(&nd->in_use, 0);
nd->blocks_per_page = 0;
atomic_store(&nd->pending_dn, 0);
g_mid_desc_head[h] = nd;
}
}
pthread_mutex_unlock(&g_mid_desc_mu[h]);
}

View File

@ -0,0 +1,80 @@
// smallobject_cold_iface_v1.h - Cold interface wrapper for SmallObject HotBox v3
// 役割:
// - SmallObject Hot Box (v3) と既存 v1 Tiny Cold 層の境界を 1 箇所にまとめる。
// - Phase A: C7 の refill/retire だけを v1 TinyHeap へラップする。
#pragma once
#include <stdint.h>
#include <stdlib.h>
#include "tiny_heap_box.h"
#include "smallobject_hotbox_v3_box.h"
#include "../hakmem_tiny.h" // TINY_SLAB_SIZE for slab base mask
struct so_page_v3;
typedef struct SmallObjectColdIface {
struct so_page_v3* (*refill_page)(void* cold_ctx, uint32_t class_idx);
void (*retire_page)(void* cold_ctx, uint32_t class_idx, struct so_page_v3* page);
} SmallObjectColdIface;
static inline struct so_page_v3* smallobject_cold_refill_page_v1(void* cold_ctx, uint32_t class_idx) {
if (class_idx != 7 && class_idx != 6) {
return NULL; // Phase A-2: C7/C6 のみ対応
}
tiny_heap_ctx_t* ctx = cold_ctx ? (tiny_heap_ctx_t*)cold_ctx : tiny_heap_ctx_for_thread();
if (!ctx) return NULL;
tiny_heap_page_t* lease = tiny_heap_prepare_page(ctx, (int)class_idx);
if (!lease) return NULL;
so_page_v3* page = (so_page_v3*)calloc(1, sizeof(so_page_v3));
if (!page) return NULL;
page->lease_page = lease;
page->meta = lease->meta;
page->ss = lease->ss;
page->slab_idx = lease->slab_idx;
page->base = lease->base;
page->capacity = lease->capacity;
page->block_size = (uint32_t)tiny_stride_for_class((int)class_idx);
page->class_idx = class_idx;
page->slab_ref = lease;
return page;
}
static inline void smallobject_cold_retire_page_v1(void* cold_ctx, uint32_t class_idx, struct so_page_v3* page) {
if (!page || (class_idx != 7 && class_idx != 6)) {
if (page) {
free(page);
}
return;
}
tiny_heap_ctx_t* ctx = cold_ctx ? (tiny_heap_ctx_t*)cold_ctx : tiny_heap_ctx_for_thread();
if (!ctx) {
free(page);
return;
}
tiny_heap_page_t* lease = page->lease_page;
if (!lease) {
free(page);
return;
}
lease->base = (uint8_t*)page->base;
lease->capacity = (uint16_t)page->capacity;
lease->used = (uint16_t)page->used;
lease->meta = page->meta;
lease->ss = page->ss;
lease->slab_idx = page->slab_idx;
lease->free_list = page->freelist;
tiny_heap_page_becomes_empty(ctx, (int)class_idx, lease);
free(page);
}
static inline SmallObjectColdIface smallobject_cold_iface_v1(void) {
SmallObjectColdIface iface = {
.refill_page = smallobject_cold_refill_page_v1,
.retire_page = smallobject_cold_retire_page_v1,
};
return iface;
}

View File

@ -0,0 +1,74 @@
// smallobject_hotbox_v3_box.h - SmallObject HotHeap v3 (C7-first skeleton)
//
// Phase A/B: 型と TLS / stats を用意し、front が呼べる枠を置く。
// まだ中身は v1 fallbackso_alloc は NULL を返す)。
#pragma once
#include <stdint.h>
#include <stddef.h>
#include <stdatomic.h>
#include "tiny_geometry_box.h"
#include "smallobject_hotbox_v3_env_box.h"
#include "tiny_region_id.h"
#ifndef SMALLOBJECT_NUM_CLASSES
#define SMALLOBJECT_NUM_CLASSES TINY_NUM_CLASSES
#endif
struct tiny_heap_page_t;
struct TinySlabMeta;
struct SuperSlab;
typedef struct so_page_v3 {
void* freelist;
uint32_t used;
uint32_t capacity;
uint32_t block_size;
uint32_t class_idx;
uint32_t flags;
void* base; // carve 後のユーザ領域先頭
void* slab_base; // 64KiB slab 基底page_of 用ヘッダを書き込む)
struct TinySlabMeta* meta;
struct SuperSlab* ss;
uint16_t slab_idx;
struct tiny_heap_page_t* lease_page;
void* slab_ref; // kept as a generic token; currently same as lease_page for v1
struct so_page_v3* next;
} so_page_v3;
typedef struct so_class_v3 {
so_page_v3* current;
so_page_v3* partial;
uint16_t max_partial_pages;
uint16_t partial_count;
uint32_t block_size;
} so_class_v3;
typedef struct so_ctx_v3 {
so_class_v3 cls[SMALLOBJECT_NUM_CLASSES];
} so_ctx_v3;
typedef struct so_stats_class_v3 {
_Atomic uint64_t route_hits;
_Atomic uint64_t alloc_calls;
_Atomic uint64_t alloc_refill;
_Atomic uint64_t alloc_fallback_v1;
_Atomic uint64_t free_calls;
_Atomic uint64_t free_fallback_v1;
} so_stats_class_v3;
// Stats helpers (defined in core/smallobject_hotbox_v3.c)
int so_v3_stats_enabled(void);
void so_v3_record_route_hit(uint8_t ci);
void so_v3_record_alloc_call(uint8_t ci);
void so_v3_record_alloc_refill(uint8_t ci);
void so_v3_record_alloc_fallback(uint8_t ci);
void so_v3_record_free_call(uint8_t ci);
void so_v3_record_free_fallback(uint8_t ci);
// TLS accessor (core/smallobject_hotbox_v3.c)
so_ctx_v3* so_tls_get(void);
// Hot path API (Phase B: stub → always fallback to v1)
void* so_alloc(uint32_t class_idx);
void so_free(uint32_t class_idx, void* ptr);

View File

@ -0,0 +1,47 @@
// smallobject_hotbox_v3_env_box.h - ENV gate for SmallObject HotHeap v3
// 役割:
// - HAKMEM_SMALL_HEAP_V3_ENABLED / HAKMEM_SMALL_HEAP_V3_CLASSES をまとめて読む。
// - デフォルトは C7-only ONクラスマスク 0x80。ENV で明示的に 0 を指定した場合のみ v3 を無効化。
#pragma once
#include <stdint.h>
#include <stdlib.h>
#include "../hakmem_tiny_config.h"
static inline int small_heap_v3_enabled(void) {
static int g_enable = -1;
if (__builtin_expect(g_enable == -1, 0)) {
const char* e = getenv("HAKMEM_SMALL_HEAP_V3_ENABLED");
if (e && *e) {
g_enable = (*e != '0') ? 1 : 0;
} else {
// デフォルトは ONENV 未指定時は有効)
g_enable = 1;
}
}
return g_enable;
}
static inline int small_heap_v3_class_enabled(uint8_t class_idx) {
static int g_parsed = 0;
static unsigned g_mask = 0;
if (__builtin_expect(!g_parsed, 0)) {
const char* e = getenv("HAKMEM_SMALL_HEAP_V3_CLASSES");
if (e && *e) {
unsigned v = (unsigned)strtoul(e, NULL, 0);
g_mask = v & 0xFFu;
} else {
// デフォルトは C7 のみ v3 ON
g_mask = 0x80u;
}
g_parsed = 1;
}
if (!small_heap_v3_enabled()) return 0;
if (class_idx >= TINY_NUM_CLASSES) return 0;
return (g_mask & (1u << class_idx)) != 0;
}
static inline int small_heap_v3_c7_enabled(void) {
return small_heap_v3_class_enabled(7);
}

View File

@ -360,7 +360,7 @@ void superslab_free(SuperSlab* ss) {
}
if (lazy_zero_enabled) {
#ifdef MADV_DONTNEED
(void)madvise((void*)ss, ss_size, MADV_DONTNEED);
(void)ss_os_madvise_guarded((void*)ss, ss_size, MADV_DONTNEED, "ss_lru_madvise");
ss_os_stats_record_madvise();
#endif
}

View File

@ -1,6 +1,7 @@
// ss_os_acquire_box.c - SuperSlab OS Memory Acquisition Box Implementation
#include "ss_os_acquire_box.h"
#include "../hakmem_build_flags.h"
#include "../hakmem_env_cache.h"
#include <sys/mman.h>
#include <sys/resource.h>
#include <errno.h>
@ -15,8 +16,11 @@ extern _Atomic uint64_t g_final_fallback_mmap_count;
extern _Atomic uint64_t g_ss_os_alloc_calls;
extern _Atomic uint64_t g_ss_os_free_calls;
extern _Atomic uint64_t g_ss_os_madvise_calls;
extern _Atomic uint64_t g_ss_os_madvise_fail_enomem;
extern _Atomic uint64_t g_ss_os_madvise_fail_other;
extern _Atomic uint64_t g_ss_os_huge_alloc_calls;
extern _Atomic uint64_t g_ss_os_huge_fail_calls;
extern _Atomic bool g_ss_madvise_disabled;
// ============================================================================
// OOM Diagnostics
@ -240,9 +244,12 @@ void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int p
// See: EXPLICIT_PREFAULT_IMPLEMENTATION_REPORT_20251205.md
#ifdef MADV_POPULATE_WRITE
if (populate) {
int ret = madvise(ptr, ss_size, MADV_POPULATE_WRITE);
ss_os_stats_record_madvise();
int ret = ss_os_madvise_guarded(ptr, ss_size, MADV_POPULATE_WRITE, "ss_os_acquire_populate");
if (ret != 0) {
if (HAK_ENV_SS_MADVISE_STRICT() && errno == EINVAL) {
fprintf(stderr, "[SS_OS] madvise(MADV_POPULATE_WRITE) EINVAL (strict mode). Aborting.\n");
abort();
}
// Fallback for kernels that support MADV_POPULATE_WRITE but it fails
// Use explicit page-by-page touching with writes
volatile char* p = (volatile char*)ptr;
@ -273,10 +280,14 @@ static void ss_os_stats_destructor(void) {
return;
}
fprintf(stderr,
"[SS_OS_STATS] alloc=%llu free=%llu madvise=%llu mmap_total=%llu fallback_mmap=%llu huge_alloc=%llu huge_fail=%llu\n",
"[SS_OS_STATS] alloc=%llu free=%llu madvise=%llu madvise_enomem=%llu madvise_other=%llu madvise_disabled=%d "
"mmap_total=%llu fallback_mmap=%llu huge_alloc=%llu huge_fail=%llu\n",
(unsigned long long)atomic_load_explicit(&g_ss_os_alloc_calls, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_free_calls, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_madvise_calls, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_madvise_fail_enomem, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_madvise_fail_other, memory_order_relaxed),
atomic_load_explicit(&g_ss_madvise_disabled, memory_order_relaxed) ? 1 : 0,
(unsigned long long)atomic_load_explicit(&g_ss_mmap_count, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_final_fallback_mmap_count, memory_order_relaxed),
(unsigned long long)atomic_load_explicit(&g_ss_os_huge_alloc_calls, memory_order_relaxed),

View File

@ -18,7 +18,11 @@
#include <stdint.h>
#include <stddef.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <errno.h>
#include <stdio.h>
// ============================================================================
// Global Counters (for debugging/diagnostics)
@ -29,8 +33,11 @@ extern _Atomic uint64_t g_final_fallback_mmap_count;
extern _Atomic uint64_t g_ss_os_alloc_calls;
extern _Atomic uint64_t g_ss_os_free_calls;
extern _Atomic uint64_t g_ss_os_madvise_calls;
extern _Atomic uint64_t g_ss_os_madvise_fail_enomem;
extern _Atomic uint64_t g_ss_os_madvise_fail_other;
extern _Atomic uint64_t g_ss_os_huge_alloc_calls;
extern _Atomic uint64_t g_ss_os_huge_fail_calls;
extern _Atomic bool g_ss_madvise_disabled;
static inline int ss_os_stats_enabled(void) {
static int g_ss_os_stats_enabled = -1;
@ -62,6 +69,52 @@ static inline void ss_os_stats_record_madvise(void) {
atomic_fetch_add_explicit(&g_ss_os_madvise_calls, 1, memory_order_relaxed);
}
// ============================================================================
// madvise guard (shared by Superslab hot/cold paths)
// ============================================================================
//
static inline int ss_os_madvise_guarded(void* ptr, size_t len, int advice, const char* where) {
(void)where;
if (!ptr || len == 0) {
return 0;
}
if (atomic_load_explicit(&g_ss_madvise_disabled, memory_order_relaxed)) {
return 0;
}
int ret = madvise(ptr, len, advice);
ss_os_stats_record_madvise();
if (ret == 0) {
return 0;
}
int e = errno;
if (e == ENOMEM) {
atomic_fetch_add_explicit(&g_ss_os_madvise_fail_enomem, 1, memory_order_relaxed);
atomic_store_explicit(&g_ss_madvise_disabled, true, memory_order_relaxed);
#if !HAKMEM_BUILD_RELEASE
static _Atomic bool g_ss_madvise_enomem_logged = false;
bool already = atomic_exchange_explicit(&g_ss_madvise_enomem_logged, true, memory_order_relaxed);
if (!already) {
fprintf(stderr,
"[SS_OS_MADVISE] madvise(advice=%d, ptr=%p, len=%zu) failed with ENOMEM "
"(vm.max_map_count reached?). Disabling further madvise calls.\n",
advice, ptr, len);
}
#endif
return 0; // soft fail, do not propagate ENOMEM
}
atomic_fetch_add_explicit(&g_ss_os_madvise_fail_other, 1, memory_order_relaxed);
if (e == EINVAL) {
errno = e;
return -1; // let caller decide (strict mode)
}
errno = e;
return 0;
}
// ============================================================================
// HugePage Experiment (research-only)
// ============================================================================

View File

@ -0,0 +1,37 @@
// tiny_cold_iface_v1.h
// TinyHotHeap v2 など別 Hot Box が Superslab/Tier/Stats と話すための共通境界 (v1 wrapper)。
// 前提: tiny_heap_box.h で tiny_heap_page_t / tiny_heap_ctx_t が定義済みであること。
#pragma once
#include "tiny_heap_box.h"
typedef struct TinyColdIface {
tiny_heap_page_t* (*refill_page)(void* cold_ctx, uint32_t class_idx);
void (*retire_page)(void* cold_ctx, uint32_t class_idx, tiny_heap_page_t* page);
} TinyColdIface;
// Forward declarations for the v1 cold helpers (defined in tiny_heap_box.h)
tiny_heap_page_t* tiny_heap_prepare_page(tiny_heap_ctx_t* ctx, int class_idx);
void tiny_heap_page_becomes_empty(tiny_heap_ctx_t* ctx, int class_idx, tiny_heap_page_t* page);
static inline tiny_heap_page_t* tiny_cold_refill_page_v1(void* cold_ctx, uint32_t class_idx) {
if (!cold_ctx) {
return NULL;
}
return tiny_heap_prepare_page((tiny_heap_ctx_t*)cold_ctx, (int)class_idx);
}
static inline void tiny_cold_retire_page_v1(void* cold_ctx, uint32_t class_idx, tiny_heap_page_t* page) {
if (!cold_ctx || !page) {
return;
}
tiny_heap_page_becomes_empty((tiny_heap_ctx_t*)cold_ctx, (int)class_idx, page);
}
static inline TinyColdIface tiny_cold_iface_v1(void) {
TinyColdIface iface = {
.refill_page = tiny_cold_refill_page_v1,
.retire_page = tiny_cold_retire_page_v1,
};
return iface;
}

View File

@ -0,0 +1,101 @@
// tiny_front_v3_env_box.h - Tiny Front v3 ENV gate & snapshot (guard/UC/header)
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
typedef struct TinyFrontV3Snapshot {
bool unified_cache_on;
bool tiny_guard_on;
uint8_t header_mode; // tiny_header_mode() の値をキャッシュ
bool header_v3_enabled; // ENV: HAKMEM_TINY_HEADER_V3_ENABLED
bool header_v3_skip_c7; // ENV: HAKMEM_TINY_HEADER_V3_SKIP_C7
} TinyFrontV3Snapshot;
// Size→class/route entry for Tiny front v3 LUT (route_kind は tiny_route_kind_t を想定)
typedef struct TinyFrontV3SizeClassEntry {
uint8_t class_idx;
uint8_t route_kind;
} TinyFrontV3SizeClassEntry;
#define TINY_FRONT_V3_INVALID_CLASS ((uint8_t)0xFF)
extern TinyFrontV3Snapshot g_tiny_front_v3_snapshot;
extern int g_tiny_front_v3_snapshot_ready;
// ENV gate: default OFF
static inline bool tiny_front_v3_enabled(void) {
static int g_enable = -1;
if (__builtin_expect(g_enable == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FRONT_V3_ENABLED");
g_enable = (e && *e && *e != '0') ? 1 : 0;
}
return g_enable != 0;
}
// Optional: size→class LUT gate (default OFF, for A/B)
static inline bool tiny_front_v3_lut_enabled(void) {
static int g = -1;
if (__builtin_expect(g == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FRONT_V3_LUT_ENABLED");
g = (e && *e && *e != '0') ? 1 : 0;
}
return g != 0;
}
// Optional: route fast path (Tiny LUT→1 switch). Default OFF for easy rollback.
static inline bool tiny_front_v3_route_fast_enabled(void) {
static int g = -1;
if (__builtin_expect(g == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FRONT_V3_ROUTE_FAST_ENABLED");
g = (e && *e && *e != '0') ? 1 : 0;
}
return g != 0;
}
// Optional stats gate
static inline bool tiny_front_v3_stats_enabled(void) {
static int g = -1;
if (__builtin_expect(g == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FRONT_V3_STATS");
g = (e && *e && *e != '0') ? 1 : 0;
}
return g != 0;
}
// Header v3 experimental gate (default OFF)
static inline bool tiny_header_v3_enabled(void) {
static int g = -1;
if (__builtin_expect(g == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_HEADER_V3_ENABLED");
g = (e && *e && *e != '0') ? 1 : 0;
}
return g != 0;
}
// Skip header write for C7 v3 allocs (bench/experiment, default OFF)
static inline bool tiny_header_v3_skip_c7(void) {
static int g = -1;
if (__builtin_expect(g == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_HEADER_V3_SKIP_C7");
g = (e && *e && *e != '0') ? 1 : 0;
}
return g != 0;
}
// Snapshot initializer (implemented in hakmem_tiny.c)
void tiny_front_v3_snapshot_init(void);
// LUT initializer / lookup (implemented in hakmem_tiny.c)
void tiny_front_v3_size_class_lut_init(void);
const TinyFrontV3SizeClassEntry* tiny_front_v3_lut_lookup(size_t size);
// Get cached snapshot (lazy init)
static inline const TinyFrontV3Snapshot* tiny_front_v3_snapshot_get(void) {
if (__builtin_expect(!g_tiny_front_v3_snapshot_ready, 0)) {
tiny_front_v3_snapshot_init();
}
return &g_tiny_front_v3_snapshot;
}

View File

@ -36,7 +36,8 @@ typedef struct tiny_hotheap_class_v2 {
tiny_hotheap_page_v2* partial_pages;
tiny_hotheap_page_v2* full_pages;
uint16_t stride;
uint16_t _pad;
uint16_t max_partial_pages; // 空ページを保持する上限C7 専用で 1〜2 を想定)
uint16_t partial_count; // いま握っている partial の枚数
tiny_hotheap_page_v2 storage_page; // C7 専用の 1 枚だけをまず保持Phase36: reuse when空き
} tiny_hotheap_class_v2;
@ -51,8 +52,8 @@ extern __thread tiny_hotheap_ctx_v2* g_tiny_hotheap_ctx_v2;
tiny_hotheap_ctx_v2* tiny_hotheap_v2_tls_get(void);
void* tiny_hotheap_v2_alloc(uint8_t class_idx);
void tiny_hotheap_v2_free(uint8_t class_idx, void* p, void* meta);
void tiny_hotheap_v2_record_route_fallback(void);
void tiny_hotheap_v2_record_free_fallback(void);
void tiny_hotheap_v2_record_route_fallback(uint8_t class_idx);
void tiny_hotheap_v2_record_free_fallback(uint8_t class_idx);
typedef struct tiny_hotheap_v2_stats_snapshot {
uint64_t route_hits;
@ -65,11 +66,19 @@ typedef struct tiny_hotheap_v2_stats_snapshot {
uint64_t free_calls;
uint64_t free_fast;
uint64_t free_fallback_v1;
uint64_t cold_refill_fail;
uint64_t cold_retire_calls;
uint64_t retire_calls_v2;
uint64_t prepare_calls;
uint64_t prepare_with_current_null;
uint64_t prepare_from_partial;
uint64_t free_made_current;
uint64_t page_retired;
uint64_t partial_pushes;
uint64_t partial_pops;
uint64_t partial_peak;
uint64_t refill_with_current;
uint64_t refill_with_partial;
} tiny_hotheap_v2_stats_snapshot_t;
void tiny_hotheap_v2_debug_snapshot(tiny_hotheap_v2_stats_snapshot_t* out);

View File

@ -9,10 +9,13 @@
#include "../hakmem_tiny_config.h"
#include "tiny_heap_env_box.h"
#include "smallobject_hotbox_v3_env_box.h"
typedef enum {
TINY_ROUTE_LEGACY = 0,
TINY_ROUTE_HEAP = 1, // TinyHeap v1
TINY_ROUTE_HOTHEAP_V2 = 2, // TinyHotHeap v2
TINY_ROUTE_HEAP = 1, // TinyHeap v1
TINY_ROUTE_HOTHEAP_V2 = 2, // TinyHotHeap v2
TINY_ROUTE_SMALL_HEAP_V3 = 3, // SmallObject HotHeap v3 (C7-first,研究箱)
} tiny_route_kind_t;
extern tiny_route_kind_t g_tiny_route_class[TINY_NUM_CLASSES];
@ -20,7 +23,9 @@ extern int g_tiny_route_snapshot_done;
static inline void tiny_route_snapshot_init(void) {
for (int i = 0; i < TINY_NUM_CLASSES; i++) {
if (tiny_hotheap_v2_class_enabled((uint8_t)i)) {
if (small_heap_v3_class_enabled((uint8_t)i)) {
g_tiny_route_class[i] = TINY_ROUTE_SMALL_HEAP_V3;
} else if (tiny_hotheap_v2_class_enabled((uint8_t)i)) {
g_tiny_route_class[i] = TINY_ROUTE_HOTHEAP_V2;
} else if (tiny_heap_box_enabled() && tiny_heap_class_route_enabled(i)) {
g_tiny_route_class[i] = TINY_ROUTE_HEAP;
@ -42,7 +47,7 @@ static inline tiny_route_kind_t tiny_route_for_class(uint8_t ci) {
}
static inline int tiny_route_is_heap_kind(tiny_route_kind_t route) {
return route == TINY_ROUTE_HEAP || route == TINY_ROUTE_HOTHEAP_V2;
return route == TINY_ROUTE_HEAP || route == TINY_ROUTE_HOTHEAP_V2 || route == TINY_ROUTE_SMALL_HEAP_V3;
}
// C7 front が TinyHeap を使うかRoute snapshot 経由で判定)