Fix debug build: gate Tiny observation snapshot in hakmem_tiny_stats.c behind HAKMEM_TINY_OBS_ENABLE to avoid incomplete TinyObsStats and missing globals. Now debug build passes, enabling C7 triage with fail‑fast guards.

This commit is contained in:
Moe Charm (CI)
2025-11-10 03:00:00 +09:00
parent d55ee48459
commit 1b6624dec4
28 changed files with 622 additions and 131 deletions

View File

@ -1,4 +1,4 @@
// tiny_superslab_alloc.inc.h - SuperSlab Allocation Layer
// tiny_superslab_alloc.inc.h - SuperSlab Allocation Layer (Box 4)
// Purpose: Slab allocation, refill, and adoption logic
// Extracted from: hakmem_tiny_free.inc lines 626-1170
// Box Theory: Box 4 (Refill/Adoption) integration
@ -14,6 +14,7 @@
// Phase 6.24: Allocate from SuperSlab slab (lazy freelist + linear allocation)
#include "hakmem_tiny_superslab_constants.h"
#include "tiny_box_geometry.h" // Box 3: Geometry & Capacity Calculator
static inline void* superslab_alloc_from_slab(SuperSlab* ss, int slab_idx) {
TinySlabMeta* meta = &ss->slabs[slab_idx];
@ -70,19 +71,15 @@ static inline void* superslab_alloc_from_slab(SuperSlab* ss, int slab_idx) {
// Phase 6.24: Linear allocation mode (freelist == NULL)
// This avoids the 4000-8000 cycle cost of building freelist on init
if (__builtin_expect(meta->freelist == NULL && meta->used < meta->capacity, 1)) {
// Linear allocation: use canonical tiny_slab_base_for() only
size_t unit_sz = g_tiny_class_sizes[ss->size_class]
#if HAKMEM_TINY_HEADER_CLASSIDX
+ ((ss->size_class != 7) ? 1 : 0)
#endif
;
uint8_t* base = tiny_slab_base_for(ss, slab_idx);
void* block_base = (void*)(base + ((size_t)meta->used * unit_sz));
// Box 3: Get stride and slab base
size_t unit_sz = tiny_stride_for_class(ss->size_class);
uint8_t* base = tiny_slab_base_for_geometry(ss, slab_idx);
void* block_base = tiny_block_at_index(base, meta->used, unit_sz);
#if !HAKMEM_BUILD_RELEASE
// Debug safety: Ensure we never carve past slab usable region (capacity mismatch guard)
size_t dbg_usable = (slab_idx == 0) ? SUPERSLAB_SLAB0_USABLE_SIZE : SUPERSLAB_SLAB_USABLE_SIZE;
uintptr_t dbg_off = (uintptr_t)((uint8_t*)block_base - base);
if (__builtin_expect(dbg_off + unit_sz > dbg_usable, 0)) {
// Box 3: Debug safety guard
if (__builtin_expect(!tiny_carve_guard(slab_idx, meta->used, unit_sz, 1), 0)) {
size_t dbg_usable = tiny_usable_bytes_for_slab(slab_idx);
uintptr_t dbg_off = (uintptr_t)((uint8_t*)block_base - base);
fprintf(stderr, "[TINY_ALLOC_BOUNDS] cls=%u slab=%d used=%u cap=%u unit=%zu off=%lu usable=%zu\n",
ss->size_class, slab_idx, meta->used, meta->capacity, unit_sz,
(unsigned long)dbg_off, dbg_usable);
@ -597,9 +594,9 @@ static inline void* hak_tiny_alloc_superslab(int class_idx) {
// Fast path: Direct metadata access (no repeated TLS reads!)
if (meta && meta->freelist == NULL && meta->used < meta->capacity && tls->slab_base) {
// Linear allocation (lazy init)
size_t block_size = g_tiny_class_sizes[tls->ss->size_class];
uint8_t* base = tls->slab_base; // tls_slab_base は tiny_slab_base_for(ss, slab_idx) 由来(唯一の真実)
// Box 3: Get stride
size_t block_size = tiny_stride_for_class(tls->ss->size_class);
uint8_t* base = tls->slab_base; // tls_slab_base は tiny_slab_base_for_geometry(ss, slab_idx) 由来(唯一の真実)
// ULTRATHINK DEBUG: Capture the 53-byte mystery
if (tiny_refill_failfast_level() >= 3 && tls->ss->size_class == 7 && slab_idx == 0) {
@ -676,8 +673,9 @@ static inline void* hak_tiny_alloc_superslab(int class_idx) {
void* block = meta->freelist;
// Safety: bounds/alignment check (debug)
if (__builtin_expect(g_tiny_safe_free, 0)) {
size_t blk = g_tiny_class_sizes[tls->ss->size_class];
uint8_t* base = tiny_slab_base_for(tls->ss, tls->slab_idx);
// Box 3: Get stride and slab base for validation
size_t blk = tiny_stride_for_class(tls->ss->size_class);
uint8_t* base = tiny_slab_base_for_geometry(tls->ss, tls->slab_idx);
uintptr_t delta = (uintptr_t)block - (uintptr_t)base;
int align_ok = ((delta % blk) == 0);
int range_ok = (delta / blk) < meta->capacity;
@ -732,8 +730,9 @@ static inline void* hak_tiny_alloc_superslab(int class_idx) {
// }
if (meta && meta->freelist == NULL && meta->used < meta->capacity && tls->slab_base) {
size_t block_size = g_tiny_class_sizes[ss->size_class];
void* block = (void*)(tls->slab_base + ((size_t)meta->used * block_size));
// Box 3: Get stride and calculate block address
size_t block_size = tiny_stride_for_class(ss->size_class);
void* block = tiny_block_at_index(tls->slab_base, meta->used, block_size);
// Disabled for benchmarks
// static int log_success = 0;