Phase V6-HDR-3: SmallSegmentV6 実割り当て & RegionIdBox Registration

実装内容:
1. SmallSegmentV6のmmap割り当ては既に v6-0で実装済み
2. small_heap_ctx_v6() で segment 取得時に region_id_register_v6_segment() 呼び出し
3. region_id_v6.c に TLS スコープのセグメント登録ロジック実装:
   - 4つの static __thread 変数でセグメント情報をキャッシュ
   - region_id_register_v6_segment(): セグメント base/end を TLS に記録
   - region_id_lookup_v6(): TLS segment の range check を最初に実行
   - TLS cache 更新で O(1) lookup 実現
4. region_id_v6_box.h に SmallSegmentV6 type include & function 宣言追加
5. small_v6_region_observe_validate() に region_id_observe_lookup() 呼び出し追加

効果:
- HeaderlessデザインでRegionIdBoxが正式にSMALL_V6分類を返せるように
- TLS-scopedな簡潔な登録メカニズム (マルチスレッド対応)
- Fast path: TLS segment range check -> page_meta lookup
- Fall back path: 従来の small_page_meta_v6_of() による動的検出
- Latency: O(1) TLS cache hit rate がv6 alloc/free の大部分をカバー

🤖 Generated with Claude Code

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
This commit is contained in:
Moe Charm (CI)
2025-12-11 23:51:48 +09:00
parent 406835feb3
commit df216b6901
10 changed files with 725 additions and 31 deletions

View File

@ -9,6 +9,8 @@
#include "box/smallobject_cold_iface_v6.h"
#include "box/smallsegment_v6_box.h"
#include "box/tiny_route_env_box.h"
#include "box/region_id_v6_box.h"
#include "box/smallobject_v6_env_box.h"
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
@ -40,6 +42,36 @@ static void small_v6_observe_free(void* ptr, uint32_t class_idx, int tls_owned)
ptr, class_idx, tls_owned);
}
// ============================================================================
// REGION_OBSERVE Validation (V6-HDR-1)
// ============================================================================
// Note: small_v6_region_observe_enabled() is now in smallobject_v6_env_box.h
/// Validate class_idx via RegionIdBox lookup (called when REGION_OBSERVE=1)
/// @param ptr: USER pointer
/// @param class_idx_hint: class_idx from front caller
static void small_v6_region_observe_validate(void* ptr, uint32_t class_idx_hint) {
RegionLookupV6 lk = region_id_lookup_v6(ptr);
// Log the lookup to REGION_ID_BOX observe (if enabled)
region_id_observe_lookup(ptr, &lk);
if (lk.kind == REGION_KIND_SMALL_V6 && lk.page_meta != NULL) {
SmallPageMetaV6* page = (SmallPageMetaV6*)lk.page_meta;
if (page->class_idx != class_idx_hint) {
fprintf(stderr, "[V6_REGION_OBSERVE] MISMATCH ptr=%p "
"hint=%u actual=%u page_meta=%p\n",
ptr, class_idx_hint, page->class_idx, (void*)page);
}
} else if (lk.kind != REGION_KIND_UNKNOWN) {
// ptr is in a different kind of region (not v6)
fprintf(stderr, "[V6_REGION_OBSERVE] KIND_MISMATCH ptr=%p "
"kind=%s (expected SMALL_V6)\n",
ptr, region_kind_to_string(lk.kind));
}
// REGION_KIND_UNKNOWN: ptr not in any v6 segment (OK for now)
}
// TLS context
static __thread struct SmallHeapCtxV6 g_small_heap_ctx_v6;
static __thread int g_small_heap_ctx_v6_init = 0;
@ -59,6 +91,9 @@ SmallHeapCtxV6* small_heap_ctx_v6(void) {
if (seg && small_segment_v6_valid(seg)) {
g_small_heap_ctx_v6.tls_seg_base = seg->base;
g_small_heap_ctx_v6.tls_seg_end = seg->base + SMALL_SEGMENT_V6_SIZE;
// Phase V6-HDR-3: Register segment with RegionIdBox (TLS scope)
region_id_register_v6_segment(seg);
}
g_small_heap_ctx_v6_init = 1;
@ -284,6 +319,11 @@ void small_free_fast_v6(void* ptr,
small_v6_observe_free(ptr, class_idx, tls_owned);
}
// V6-HDR-1: REGION_OBSERVE mode - validate class_idx via RegionIdBox
if (unlikely(small_v6_region_observe_enabled())) {
small_v6_region_observe_validate(ptr, class_idx);
}
// Fast path: TLS segment ownership + TLS push
if (likely(tls_owned)) {
// C6 TLS push
@ -442,3 +482,181 @@ void small_free_cold_v6(void* ptr, uint32_t class_idx) {
small_cold_v6_retire_page(page);
}
}
// ============================================================================
// Phase V6-HDR-2: Headerless Free/Alloc Implementation
// ============================================================================
/// Headerless free: uses RegionIdBox for ptr classification
/// @param ctx: TLS context
/// @param ptr: USER pointer to free
/// @param class_idx_hint: class_idx from front (header byte)
/// @return: true if handled by v6, false if fallback needed
bool small_v6_headerless_free(SmallHeapCtxV6* ctx, void* ptr, uint8_t class_idx_hint) {
// Step 1: RegionIdBox lookup (no header read)
RegionLookupV6 lk = region_id_lookup_v6(ptr);
if (lk.kind != REGION_KIND_SMALL_V6) {
// Not a v6 managed region -> front should fallback
return false;
}
SmallPageMetaV6* page = (SmallPageMetaV6*)lk.page_meta;
if (!page) {
return false;
}
uint8_t class_idx = page->class_idx;
// Step 2: OBSERVE mode - validate class_idx hint
if (unlikely(small_v6_region_observe_enabled())) {
if (class_idx != class_idx_hint) {
fprintf(stderr, "[V6_HDR_FREE] MISMATCH ptr=%p hint=%u actual=%u\n",
ptr, class_idx_hint, class_idx);
}
}
// Step 3: Convert USER -> BASE (no header touch)
void* base = SMALL_V6_BASE_FROM_USER(ptr);
// Step 4: TLS ownership check + TLS push
if (small_tls_owns_ptr_v6(ctx, ptr)) {
// C6 TLS push
if (class_idx == SMALL_V6_C6_CLASS_IDX && ctx->tls_count_c6 < SMALL_V6_TLS_CAP) {
ctx->tls_freelist_c6[ctx->tls_count_c6++] = base;
return true;
}
// C5 TLS push
if (class_idx == SMALL_V6_C5_CLASS_IDX && ctx->tls_count_c5 < SMALL_V6_TLS_CAP) {
ctx->tls_freelist_c5[ctx->tls_count_c5++] = base;
return true;
}
// C4 TLS push
if (class_idx == SMALL_V6_C4_CLASS_IDX && ctx->tls_count_c4 < SMALL_V6_TLS_CAP) {
ctx->tls_freelist_c4[ctx->tls_count_c4++] = base;
return true;
}
}
// Step 5: Cold path - push to page freelist
*(void**)base = page->free_list;
page->free_list = base;
if (page->used > 0) page->used--;
// Retire empty page
if (page->used == 0) {
small_cold_v6_retire_page(page);
}
return true;
}
/// Headerless alloc: TLS pop without header write
/// Header is already written during carve/refill
/// @param ctx: TLS context
/// @param class_idx: class index (4=C4, 5=C5, 6=C6)
/// @return: USER pointer or NULL (fallback needed)
void* small_v6_headerless_alloc(SmallHeapCtxV6* ctx, uint8_t class_idx) {
// TLS fast path (no header write - already done in refill)
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
if (likely(ctx->tls_count_c6 > 0)) {
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
return SMALL_V6_USER_FROM_BASE(blk);
}
} else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
if (likely(ctx->tls_count_c5 > 0)) {
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
return SMALL_V6_USER_FROM_BASE(blk);
}
} else if (class_idx == SMALL_V6_C4_CLASS_IDX) {
if (likely(ctx->tls_count_c4 > 0)) {
void* blk = ctx->tls_freelist_c4[--ctx->tls_count_c4];
return SMALL_V6_USER_FROM_BASE(blk);
}
}
// TLS empty -> need refill from cold path
// NOTE: Refill writes header, so alloc doesn't need to
SmallPageMetaV6* page = small_cold_v6_refill_page(class_idx);
if (!page || !page->free_list) {
return NULL; // Front should fallback to legacy
}
uint8_t header_byte = SMALL_V6_HEADER_FROM_CLASS(class_idx);
int max_fill = SMALL_V6_TLS_CAP;
int filled = 0;
// Refill TLS from page
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
max_fill -= ctx->tls_count_c6;
while (page->free_list && filled < max_fill - 1) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
((uint8_t*)blk)[0] = header_byte; // Header write on refill only
ctx->tls_freelist_c6[ctx->tls_count_c6++] = blk;
filled++;
}
page->used += filled;
if (page->free_list) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
page->used++;
((uint8_t*)blk)[0] = header_byte;
return SMALL_V6_USER_FROM_BASE(blk);
}
if (ctx->tls_count_c6 > 0) {
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
return SMALL_V6_USER_FROM_BASE(blk);
}
} else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
max_fill -= ctx->tls_count_c5;
while (page->free_list && filled < max_fill - 1) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
((uint8_t*)blk)[0] = header_byte;
ctx->tls_freelist_c5[ctx->tls_count_c5++] = blk;
filled++;
}
page->used += filled;
if (page->free_list) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
page->used++;
((uint8_t*)blk)[0] = header_byte;
return SMALL_V6_USER_FROM_BASE(blk);
}
if (ctx->tls_count_c5 > 0) {
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
return SMALL_V6_USER_FROM_BASE(blk);
}
} else if (class_idx == SMALL_V6_C4_CLASS_IDX) {
max_fill -= ctx->tls_count_c4;
while (page->free_list && filled < max_fill - 1) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
((uint8_t*)blk)[0] = header_byte;
ctx->tls_freelist_c4[ctx->tls_count_c4++] = blk;
filled++;
}
page->used += filled;
if (page->free_list) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
page->used++;
((uint8_t*)blk)[0] = header_byte;
return SMALL_V6_USER_FROM_BASE(blk);
}
if (ctx->tls_count_c4 > 0) {
void* blk = ctx->tls_freelist_c4[--ctx->tls_count_c4];
return SMALL_V6_USER_FROM_BASE(blk);
}
}
return NULL;
}