Phase 36-37: TinyHotHeap v2 HotBox redesign and C7 current_page policy fixes

- Redefine TinyHotHeap v2 as per-thread Hot Box with clear boundaries
- Add comprehensive OS statistics tracking for SS allocations
- Implement route-based free handling for TinyHeap v2
- Add C6/C7 debugging and statistics improvements
- Update documentation with implementation guidelines and analysis
- Add new box headers for stats, routing, and front-end management
This commit is contained in:
Moe Charm (CI)
2025-12-08 21:30:21 +09:00
parent 34a8fd69b6
commit 8f18963ad5
37 changed files with 3205 additions and 167 deletions

View File

@ -264,6 +264,37 @@ static inline tiny_heap_class_t* tiny_heap_class(tiny_heap_ctx_t* ctx, int class
return &ctx->cls[class_idx];
}
static inline int tiny_heap_tls_try_resolve(int class_idx,
void* base,
SuperSlab** out_ss,
int* out_slab_idx,
TinySlabMeta** out_meta) {
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) return 0;
TinyTLSSlab* tls = &g_tls_slabs[class_idx];
if (!tls->ss || !tls->slab_base || !tls->meta) return 0;
const size_t stride = (size_t)tiny_stride_for_class(class_idx);
const size_t cap = (size_t)tls->meta->capacity;
if (stride == 0 || cap == 0) return 0;
uint8_t* low = tls->slab_base;
uint8_t* high = low + stride * cap;
if ((uint8_t*)base < low || (uint8_t*)base >= high) {
return 0;
}
if (out_ss) {
*out_ss = tls->ss;
}
if (out_slab_idx) {
*out_slab_idx = (int)tls->slab_idx;
}
if (out_meta) {
*out_meta = tls->meta;
}
return 1;
}
static inline int tiny_heap_page_is_valid(tiny_heap_class_t* hcls, tiny_heap_page_t* page) {
if (!hcls || !page) return 0;
return (page >= hcls->nodes) && (page < (hcls->nodes + TINY_HEAP_MAX_PAGES_PER_CLASS));
@ -630,14 +661,15 @@ static inline void tiny_heap_page_push_to_partial(tiny_heap_class_t* hcls, tiny_
static inline void tiny_heap_page_becomes_empty(tiny_heap_ctx_t* ctx, int class_idx, tiny_heap_page_t* page) {
tiny_heap_class_t* hcls = tiny_heap_class(ctx, class_idx);
if (!hcls || !page) return;
const int mode = tiny_heap_meta_mode_effective(class_idx);
if (tiny_heap_meta_ultra_enabled_for_class(class_idx)) {
// ULTRA: C7 は 1 ページ前提で保持し続ける。publish/unlink/release を避ける。
hcls->current_page = page;
return;
}
if (tiny_heap_meta_light_enabled_for_class(class_idx)) {
// SAFE: delta を反映
if (mode == 1) {
// SAFE: delta を反映C6/C7
if (class_idx == 6) {
tiny_c6_mark_delta_site(page, C6_DELTA_EMPTY);
}
@ -685,18 +717,43 @@ static inline void tiny_heap_page_mark_full(tiny_heap_class_t* hcls, tiny_heap_p
}
static inline void* tiny_heap_page_pop(tiny_heap_class_t* hcls, int class_idx, tiny_heap_page_t* page) {
const int mode = tiny_heap_meta_mode_effective(class_idx);
const int c6_pop_dbg = (class_idx == 6) && tiny_c6_debug_pop_enabled();
if (c6_pop_dbg) {
static _Atomic uint32_t g_pop_dbg = 0;
uint32_t pop_n = atomic_fetch_add_explicit(&g_pop_dbg, 1, memory_order_relaxed);
if (pop_n < 8) {
fprintf(stderr, "[POP_ENTRY] cls=%d page=%p\n", class_idx, (void*)page);
}
}
if (!tiny_heap_page_is_valid(hcls, page)) return NULL;
const int mode = tiny_heap_meta_mode_effective(class_idx);
if (class_idx == 7 && __builtin_expect(mode == 1, 1)) {
if (!page->meta || !page->ss || !page->base) return NULL;
void* block = NULL;
if (page->free_list) {
block = page->free_list;
void* next = tiny_next_read(class_idx, block);
page->free_list = next;
atomic_store_explicit(&page->meta->freelist, next, memory_order_release);
} else {
if (page->used >= page->capacity) {
return NULL;
}
size_t stride = hcls->stride;
if (stride == 0) {
stride = tiny_heap_block_stride(class_idx);
hcls->stride = (uint16_t)stride;
}
block = (void*)(page->base + ((size_t)page->used * stride));
if (page->meta->carved < page->capacity) {
page->meta->carved++;
}
}
page->used++;
page->used_delta++;
page->active_delta++;
if (tiny_heap_delta_should_flush(class_idx, page)) {
tiny_heap_meta_flush_page(class_idx, page);
}
return tiny_region_id_write_header(block, class_idx);
}
const int c6_pop_dbg = (class_idx == 6) && tiny_c6_debug_pop_enabled();
if (!page->meta || !page->ss || !page->base) return NULL;
if (class_idx == 6 && mode == 1) {
if (c6_pop_dbg && mode == 1) {
int fail = 0;
const char* reason = NULL;
SuperSlab* ss_chk = hak_super_lookup(page->base);
@ -1134,16 +1191,27 @@ static inline void tiny_heap_free_class_fast(tiny_heap_ctx_t* ctx, int class_idx
#else
void* base = (void*)((uint8_t*)ptr - 1);
#endif
SuperSlab* ss = hak_super_lookup(base);
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
TinyHeapClassStats* stats = tiny_heap_stats_for_class(class_idx);
if (__builtin_expect(stats != NULL, 0)) {
atomic_fetch_add_explicit(&stats->free_slow_fallback, 1, memory_order_relaxed);
SuperSlab* ss = NULL;
int slab_idx = -1;
if (class_idx == 7 || class_idx == 6) {
if (!tiny_heap_tls_try_resolve(class_idx, base, &ss, &slab_idx, NULL)) {
ss = NULL;
}
tiny_heap_cold_drain_and_free(class_idx, base);
return;
}
int slab_idx = slab_index_for(ss, base);
if (!ss) {
ss = hak_super_lookup(base);
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
TinyHeapClassStats* stats = tiny_heap_stats_for_class(class_idx);
if (__builtin_expect(stats != NULL, 0)) {
atomic_fetch_add_explicit(&stats->free_slow_fallback, 1, memory_order_relaxed);
}
tiny_heap_cold_drain_and_free(class_idx, base);
return;
}
}
if (slab_idx < 0) {
slab_idx = slab_index_for(ss, base);
}
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
TinyHeapClassStats* stats = tiny_heap_stats_for_class(class_idx);
if (__builtin_expect(stats != NULL, 0)) {