実装内容: 1. SmallSegmentV6のmmap割り当ては既に v6-0で実装済み 2. small_heap_ctx_v6() で segment 取得時に region_id_register_v6_segment() 呼び出し 3. region_id_v6.c に TLS スコープのセグメント登録ロジック実装: - 4つの static __thread 変数でセグメント情報をキャッシュ - region_id_register_v6_segment(): セグメント base/end を TLS に記録 - region_id_lookup_v6(): TLS segment の range check を最初に実行 - TLS cache 更新で O(1) lookup 実現 4. region_id_v6_box.h に SmallSegmentV6 type include & function 宣言追加 5. small_v6_region_observe_validate() に region_id_observe_lookup() 呼び出し追加 効果: - HeaderlessデザインでRegionIdBoxが正式にSMALL_V6分類を返せるように - TLS-scopedな簡潔な登録メカニズム (マルチスレッド対応) - Fast path: TLS segment range check -> page_meta lookup - Fall back path: 従来の small_page_meta_v6_of() による動的検出 - Latency: O(1) TLS cache hit rate がv6 alloc/free の大部分をカバー 🤖 Generated with Claude Code Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
663 lines
23 KiB
C
663 lines
23 KiB
C
// smallobject_core_v6.c - SmallObject Core v6 実装
|
|
//
|
|
// Phase V6-HDR-0: C6-only headerless core with OBSERVE mode
|
|
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <stdio.h>
|
|
#include "box/smallobject_core_v6_box.h"
|
|
#include "box/smallobject_cold_iface_v6.h"
|
|
#include "box/smallsegment_v6_box.h"
|
|
#include "box/tiny_route_env_box.h"
|
|
#include "box/region_id_v6_box.h"
|
|
#include "box/smallobject_v6_env_box.h"
|
|
|
|
#ifndef likely
|
|
#define likely(x) __builtin_expect(!!(x), 1)
|
|
#define unlikely(x) __builtin_expect(!!(x), 0)
|
|
#endif
|
|
|
|
// ============================================================================
|
|
// OBSERVE Mode (V6-HDR-0)
|
|
// ============================================================================
|
|
// ENV: HAKMEM_SMALL_V6_OBSERVE=1 enables logging at free entry
|
|
|
|
#define V6_OBSERVE_UNINIT (-1)
|
|
#define V6_OBSERVE_OFF 0
|
|
#define V6_OBSERVE_ON 1
|
|
|
|
static int g_v6_observe = V6_OBSERVE_UNINIT;
|
|
|
|
static inline int small_v6_observe_enabled(void) {
|
|
if (unlikely(g_v6_observe == V6_OBSERVE_UNINIT)) {
|
|
const char* env = getenv("HAKMEM_SMALL_V6_OBSERVE");
|
|
g_v6_observe = (env && env[0] == '1') ? V6_OBSERVE_ON : V6_OBSERVE_OFF;
|
|
}
|
|
return g_v6_observe == V6_OBSERVE_ON;
|
|
}
|
|
|
|
/// Log free entry (called when OBSERVE=1)
|
|
static void small_v6_observe_free(void* ptr, uint32_t class_idx, int tls_owned) {
|
|
fprintf(stderr, "[V6_OBSERVE] free ptr=%p class=%u tls_owned=%d\n",
|
|
ptr, class_idx, tls_owned);
|
|
}
|
|
|
|
// ============================================================================
|
|
// REGION_OBSERVE Validation (V6-HDR-1)
|
|
// ============================================================================
|
|
// Note: small_v6_region_observe_enabled() is now in smallobject_v6_env_box.h
|
|
|
|
/// Validate class_idx via RegionIdBox lookup (called when REGION_OBSERVE=1)
|
|
/// @param ptr: USER pointer
|
|
/// @param class_idx_hint: class_idx from front caller
|
|
static void small_v6_region_observe_validate(void* ptr, uint32_t class_idx_hint) {
|
|
RegionLookupV6 lk = region_id_lookup_v6(ptr);
|
|
|
|
// Log the lookup to REGION_ID_BOX observe (if enabled)
|
|
region_id_observe_lookup(ptr, &lk);
|
|
|
|
if (lk.kind == REGION_KIND_SMALL_V6 && lk.page_meta != NULL) {
|
|
SmallPageMetaV6* page = (SmallPageMetaV6*)lk.page_meta;
|
|
if (page->class_idx != class_idx_hint) {
|
|
fprintf(stderr, "[V6_REGION_OBSERVE] MISMATCH ptr=%p "
|
|
"hint=%u actual=%u page_meta=%p\n",
|
|
ptr, class_idx_hint, page->class_idx, (void*)page);
|
|
}
|
|
} else if (lk.kind != REGION_KIND_UNKNOWN) {
|
|
// ptr is in a different kind of region (not v6)
|
|
fprintf(stderr, "[V6_REGION_OBSERVE] KIND_MISMATCH ptr=%p "
|
|
"kind=%s (expected SMALL_V6)\n",
|
|
ptr, region_kind_to_string(lk.kind));
|
|
}
|
|
// REGION_KIND_UNKNOWN: ptr not in any v6 segment (OK for now)
|
|
}
|
|
|
|
// TLS context
|
|
static __thread struct SmallHeapCtxV6 g_small_heap_ctx_v6;
|
|
static __thread int g_small_heap_ctx_v6_init = 0;
|
|
|
|
// TLS policy snapshot
|
|
static __thread struct SmallPolicySnapshotV6 g_snap_v6;
|
|
static __thread int g_snap_v6_init = 0;
|
|
|
|
/// Get TLS heap context for v6 (lazy initialization)
|
|
/// @return: TLS context pointer (never NULL)
|
|
SmallHeapCtxV6* small_heap_ctx_v6(void) {
|
|
if (!g_small_heap_ctx_v6_init) {
|
|
memset(&g_small_heap_ctx_v6, 0, sizeof(g_small_heap_ctx_v6));
|
|
|
|
// Initialize TLS segment ownership range
|
|
SmallSegmentV6* seg = small_segment_v6_acquire_for_thread();
|
|
if (seg && small_segment_v6_valid(seg)) {
|
|
g_small_heap_ctx_v6.tls_seg_base = seg->base;
|
|
g_small_heap_ctx_v6.tls_seg_end = seg->base + SMALL_SEGMENT_V6_SIZE;
|
|
|
|
// Phase V6-HDR-3: Register segment with RegionIdBox (TLS scope)
|
|
region_id_register_v6_segment(seg);
|
|
}
|
|
|
|
g_small_heap_ctx_v6_init = 1;
|
|
}
|
|
return &g_small_heap_ctx_v6;
|
|
}
|
|
|
|
/// Get TLS policy snapshot for v6 (lazy initialization)
|
|
/// @return: Policy snapshot pointer (never NULL)
|
|
const SmallPolicySnapshotV6* tiny_policy_snapshot_v6(void) {
|
|
if (!g_snap_v6_init) {
|
|
memset(&g_snap_v6, 0, sizeof(g_snap_v6));
|
|
|
|
// Initialize route_kind from tiny_route API (this ensures init is done)
|
|
for (int i = 0; i < 8; i++) {
|
|
g_snap_v6.route_kind[i] = (uint8_t)tiny_route_for_class((uint8_t)i);
|
|
}
|
|
|
|
g_snap_v6_init = 1;
|
|
}
|
|
return &g_snap_v6;
|
|
}
|
|
|
|
// Forward declarations for pool v1 fallback
|
|
extern void* hak_pool_try_alloc(size_t size, uintptr_t site_id);
|
|
extern void hak_pool_free(void* ptr, size_t size, uintptr_t site_id);
|
|
|
|
// ============================================================================
|
|
// Allocation Implementation
|
|
// ============================================================================
|
|
|
|
/// Allocate block from C6 v6 TLS freelist or refill
|
|
/// @param size: requested size (unused, class_idx determines size)
|
|
/// @param class_idx: size class index (must be C6 for v6 route)
|
|
/// @param ctx: TLS context
|
|
/// @param snap: policy snapshot
|
|
/// @return: USER pointer (BASE+1) or NULL on fallback
|
|
void* small_alloc_fast_v6(size_t size,
|
|
uint32_t class_idx,
|
|
SmallHeapCtxV6* ctx,
|
|
const SmallPolicySnapshotV6* snap) {
|
|
(void)size;
|
|
|
|
// Bounds check
|
|
if (unlikely(class_idx >= 8)) {
|
|
return hak_pool_try_alloc(size, 0);
|
|
}
|
|
|
|
uint8_t route = snap->route_kind[class_idx];
|
|
|
|
// v6-5: Support C6 and C5 classes
|
|
if (route != TINY_ROUTE_SMALL_HEAP_V6) {
|
|
return hak_pool_try_alloc(size, 0);
|
|
}
|
|
|
|
// C6 fast path
|
|
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
|
|
// Fast path: TLS freelist hit
|
|
if (likely(ctx->tls_count_c6 > 0)) {
|
|
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
|
|
// v6-3: Header already written during refill, just return USER pointer
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
// C5 fast path (Phase v6-5)
|
|
else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
|
|
// Fast path: TLS freelist hit
|
|
if (likely(ctx->tls_count_c5 > 0)) {
|
|
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
// C4 fast path (Phase v6-6)
|
|
else if (class_idx == SMALL_V6_C4_CLASS_IDX) {
|
|
// Fast path: TLS freelist hit
|
|
if (likely(ctx->tls_count_c4 > 0)) {
|
|
void* blk = ctx->tls_freelist_c4[--ctx->tls_count_c4];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
else {
|
|
// Unsupported class for v6
|
|
return hak_pool_try_alloc(size, 0);
|
|
}
|
|
|
|
// Slow path: refill TLS with multiple blocks (batching)
|
|
SmallPageMetaV6* page = small_cold_v6_refill_page(class_idx);
|
|
if (!page || !page->free_list) {
|
|
return hak_pool_try_alloc(size, 0); // Safety fallback
|
|
}
|
|
|
|
// v6-5: Batch refill - support C6 and C5
|
|
uint8_t header_byte = SMALL_V6_HEADER_FROM_CLASS(class_idx);
|
|
|
|
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
|
|
// C6 refill path
|
|
int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c6;
|
|
int filled = 0;
|
|
|
|
// Fill TLS (leave room for 1 to return)
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
ctx->tls_freelist_c6[ctx->tls_count_c6++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
// Pop one more to return to caller
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
// If we filled TLS but no more blocks, pop from TLS
|
|
if (ctx->tls_count_c6 > 0) {
|
|
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
|
|
// C5 refill path (Phase v6-5)
|
|
int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c5;
|
|
int filled = 0;
|
|
|
|
// Fill TLS (leave room for 1 to return)
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
ctx->tls_freelist_c5[ctx->tls_count_c5++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
// Pop one more to return to caller
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
// If we filled TLS but no more blocks, pop from TLS
|
|
if (ctx->tls_count_c5 > 0) {
|
|
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
else if (class_idx == SMALL_V6_C4_CLASS_IDX) {
|
|
// C4 refill path (Phase v6-6)
|
|
int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c4;
|
|
int filled = 0;
|
|
|
|
// Fill TLS (leave room for 1 to return)
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
ctx->tls_freelist_c4[ctx->tls_count_c4++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
// Pop one more to return to caller
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
// If we filled TLS but no more blocks, pop from TLS
|
|
if (ctx->tls_count_c4 > 0) {
|
|
void* blk = ctx->tls_freelist_c4[--ctx->tls_count_c4];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
|
|
// Should not reach here
|
|
return hak_pool_try_alloc(size, 0);
|
|
}
|
|
|
|
// ============================================================================
|
|
// Free Implementation
|
|
// ============================================================================
|
|
|
|
/// Free block to C6 v6 TLS freelist or page freelist
|
|
/// @param ptr: USER pointer to free
|
|
/// @param class_idx: size class index
|
|
/// @param ctx: TLS context
|
|
/// @param snap: policy snapshot
|
|
void small_free_fast_v6(void* ptr,
|
|
uint32_t class_idx,
|
|
SmallHeapCtxV6* ctx,
|
|
const SmallPolicySnapshotV6* snap) {
|
|
// Bounds check
|
|
if (unlikely(class_idx >= 8)) {
|
|
hak_pool_free(ptr, 0, 0);
|
|
return;
|
|
}
|
|
|
|
uint8_t route = snap->route_kind[class_idx];
|
|
|
|
// v6-5: Check if this is CORE_V6 route
|
|
if (route != TINY_ROUTE_SMALL_HEAP_V6) {
|
|
hak_pool_free(ptr, 0, 0);
|
|
return;
|
|
}
|
|
|
|
// Convert USER pointer to BASE pointer
|
|
void* base = SMALL_V6_BASE_FROM_USER(ptr);
|
|
|
|
// V6-HDR-0: OBSERVE mode logging (check TLS ownership first for log)
|
|
int tls_owned = small_tls_owns_ptr_v6(ctx, ptr);
|
|
if (unlikely(small_v6_observe_enabled())) {
|
|
small_v6_observe_free(ptr, class_idx, tls_owned);
|
|
}
|
|
|
|
// V6-HDR-1: REGION_OBSERVE mode - validate class_idx via RegionIdBox
|
|
if (unlikely(small_v6_region_observe_enabled())) {
|
|
small_v6_region_observe_validate(ptr, class_idx);
|
|
}
|
|
|
|
// Fast path: TLS segment ownership + TLS push
|
|
if (likely(tls_owned)) {
|
|
// C6 TLS push
|
|
if (class_idx == SMALL_V6_C6_CLASS_IDX && ctx->tls_count_c6 < SMALL_V6_TLS_CAP) {
|
|
ctx->tls_freelist_c6[ctx->tls_count_c6++] = base;
|
|
return;
|
|
}
|
|
// C5 TLS push (Phase v6-5)
|
|
if (class_idx == SMALL_V6_C5_CLASS_IDX && ctx->tls_count_c5 < SMALL_V6_TLS_CAP) {
|
|
ctx->tls_freelist_c5[ctx->tls_count_c5++] = base;
|
|
return;
|
|
}
|
|
// C4 TLS push (Phase v6-6)
|
|
if (class_idx == SMALL_V6_C4_CLASS_IDX && ctx->tls_count_c4 < SMALL_V6_TLS_CAP) {
|
|
ctx->tls_freelist_c4[ctx->tls_count_c4++] = base;
|
|
return;
|
|
}
|
|
}
|
|
|
|
// Slow path: page_meta lookup and push to page freelist
|
|
SmallPageMetaV6* page = small_page_meta_v6_of(ptr);
|
|
if (!page) {
|
|
hak_pool_free(ptr, 0, 0);
|
|
return;
|
|
}
|
|
|
|
// Push to page freelist (using BASE pointer)
|
|
*(void**)base = page->free_list;
|
|
page->free_list = base;
|
|
if (page->used > 0) page->used--;
|
|
|
|
// Retire empty page
|
|
if (page->used == 0) {
|
|
small_cold_v6_retire_page(page);
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// Cold Path Implementation (Phase v6-6)
|
|
// ============================================================================
|
|
|
|
/// Cold path: alloc with refill - called when TLS is empty
|
|
/// @param class_idx: C4, C5 or C6
|
|
/// @param ctx: TLS context
|
|
/// @return: USER pointer or NULL
|
|
void* small_alloc_cold_v6(uint32_t class_idx, SmallHeapCtxV6* ctx) {
|
|
// Refill TLS from page
|
|
SmallPageMetaV6* page = small_cold_v6_refill_page(class_idx);
|
|
if (!page || !page->free_list) {
|
|
return hak_pool_try_alloc(class_idx == SMALL_V6_C6_CLASS_IDX ? 512 : (class_idx == SMALL_V6_C5_CLASS_IDX ? 256 : 128), 0);
|
|
}
|
|
|
|
uint8_t header_byte = SMALL_V6_HEADER_FROM_CLASS(class_idx);
|
|
|
|
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
|
|
int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c6;
|
|
int filled = 0;
|
|
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
ctx->tls_freelist_c6[ctx->tls_count_c6++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
if (ctx->tls_count_c6 > 0) {
|
|
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
|
|
int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c5;
|
|
int filled = 0;
|
|
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
ctx->tls_freelist_c5[ctx->tls_count_c5++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
if (ctx->tls_count_c5 > 0) {
|
|
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
else if (class_idx == SMALL_V6_C4_CLASS_IDX) {
|
|
int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c4;
|
|
int filled = 0;
|
|
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
ctx->tls_freelist_c4[ctx->tls_count_c4++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
if (ctx->tls_count_c4 > 0) {
|
|
void* blk = ctx->tls_freelist_c4[--ctx->tls_count_c4];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
|
|
return hak_pool_try_alloc(class_idx == SMALL_V6_C6_CLASS_IDX ? 512 : (class_idx == SMALL_V6_C5_CLASS_IDX ? 256 : 128), 0);
|
|
}
|
|
|
|
/// Cold path: free to page freelist - called when TLS full or cross-thread
|
|
/// @param ptr: USER pointer
|
|
/// @param class_idx: C5 or C6
|
|
void small_free_cold_v6(void* ptr, uint32_t class_idx) {
|
|
(void)class_idx; // Not needed for page lookup
|
|
|
|
void* base = SMALL_V6_BASE_FROM_USER(ptr);
|
|
|
|
SmallPageMetaV6* page = small_page_meta_v6_of(ptr);
|
|
if (!page) {
|
|
hak_pool_free(ptr, 0, 0);
|
|
return;
|
|
}
|
|
|
|
*(void**)base = page->free_list;
|
|
page->free_list = base;
|
|
if (page->used > 0) page->used--;
|
|
|
|
if (page->used == 0) {
|
|
small_cold_v6_retire_page(page);
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// Phase V6-HDR-2: Headerless Free/Alloc Implementation
|
|
// ============================================================================
|
|
|
|
/// Headerless free: uses RegionIdBox for ptr classification
|
|
/// @param ctx: TLS context
|
|
/// @param ptr: USER pointer to free
|
|
/// @param class_idx_hint: class_idx from front (header byte)
|
|
/// @return: true if handled by v6, false if fallback needed
|
|
bool small_v6_headerless_free(SmallHeapCtxV6* ctx, void* ptr, uint8_t class_idx_hint) {
|
|
// Step 1: RegionIdBox lookup (no header read)
|
|
RegionLookupV6 lk = region_id_lookup_v6(ptr);
|
|
|
|
if (lk.kind != REGION_KIND_SMALL_V6) {
|
|
// Not a v6 managed region -> front should fallback
|
|
return false;
|
|
}
|
|
|
|
SmallPageMetaV6* page = (SmallPageMetaV6*)lk.page_meta;
|
|
if (!page) {
|
|
return false;
|
|
}
|
|
|
|
uint8_t class_idx = page->class_idx;
|
|
|
|
// Step 2: OBSERVE mode - validate class_idx hint
|
|
if (unlikely(small_v6_region_observe_enabled())) {
|
|
if (class_idx != class_idx_hint) {
|
|
fprintf(stderr, "[V6_HDR_FREE] MISMATCH ptr=%p hint=%u actual=%u\n",
|
|
ptr, class_idx_hint, class_idx);
|
|
}
|
|
}
|
|
|
|
// Step 3: Convert USER -> BASE (no header touch)
|
|
void* base = SMALL_V6_BASE_FROM_USER(ptr);
|
|
|
|
// Step 4: TLS ownership check + TLS push
|
|
if (small_tls_owns_ptr_v6(ctx, ptr)) {
|
|
// C6 TLS push
|
|
if (class_idx == SMALL_V6_C6_CLASS_IDX && ctx->tls_count_c6 < SMALL_V6_TLS_CAP) {
|
|
ctx->tls_freelist_c6[ctx->tls_count_c6++] = base;
|
|
return true;
|
|
}
|
|
// C5 TLS push
|
|
if (class_idx == SMALL_V6_C5_CLASS_IDX && ctx->tls_count_c5 < SMALL_V6_TLS_CAP) {
|
|
ctx->tls_freelist_c5[ctx->tls_count_c5++] = base;
|
|
return true;
|
|
}
|
|
// C4 TLS push
|
|
if (class_idx == SMALL_V6_C4_CLASS_IDX && ctx->tls_count_c4 < SMALL_V6_TLS_CAP) {
|
|
ctx->tls_freelist_c4[ctx->tls_count_c4++] = base;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
// Step 5: Cold path - push to page freelist
|
|
*(void**)base = page->free_list;
|
|
page->free_list = base;
|
|
if (page->used > 0) page->used--;
|
|
|
|
// Retire empty page
|
|
if (page->used == 0) {
|
|
small_cold_v6_retire_page(page);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
/// Headerless alloc: TLS pop without header write
|
|
/// Header is already written during carve/refill
|
|
/// @param ctx: TLS context
|
|
/// @param class_idx: class index (4=C4, 5=C5, 6=C6)
|
|
/// @return: USER pointer or NULL (fallback needed)
|
|
void* small_v6_headerless_alloc(SmallHeapCtxV6* ctx, uint8_t class_idx) {
|
|
// TLS fast path (no header write - already done in refill)
|
|
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
|
|
if (likely(ctx->tls_count_c6 > 0)) {
|
|
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
} else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
|
|
if (likely(ctx->tls_count_c5 > 0)) {
|
|
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
} else if (class_idx == SMALL_V6_C4_CLASS_IDX) {
|
|
if (likely(ctx->tls_count_c4 > 0)) {
|
|
void* blk = ctx->tls_freelist_c4[--ctx->tls_count_c4];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
|
|
// TLS empty -> need refill from cold path
|
|
// NOTE: Refill writes header, so alloc doesn't need to
|
|
SmallPageMetaV6* page = small_cold_v6_refill_page(class_idx);
|
|
if (!page || !page->free_list) {
|
|
return NULL; // Front should fallback to legacy
|
|
}
|
|
|
|
uint8_t header_byte = SMALL_V6_HEADER_FROM_CLASS(class_idx);
|
|
int max_fill = SMALL_V6_TLS_CAP;
|
|
int filled = 0;
|
|
|
|
// Refill TLS from page
|
|
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
|
|
max_fill -= ctx->tls_count_c6;
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte; // Header write on refill only
|
|
ctx->tls_freelist_c6[ctx->tls_count_c6++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
if (ctx->tls_count_c6 > 0) {
|
|
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
} else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
|
|
max_fill -= ctx->tls_count_c5;
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
ctx->tls_freelist_c5[ctx->tls_count_c5++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
if (ctx->tls_count_c5 > 0) {
|
|
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
} else if (class_idx == SMALL_V6_C4_CLASS_IDX) {
|
|
max_fill -= ctx->tls_count_c4;
|
|
while (page->free_list && filled < max_fill - 1) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
ctx->tls_freelist_c4[ctx->tls_count_c4++] = blk;
|
|
filled++;
|
|
}
|
|
page->used += filled;
|
|
|
|
if (page->free_list) {
|
|
void* blk = page->free_list;
|
|
page->free_list = *(void**)blk;
|
|
page->used++;
|
|
((uint8_t*)blk)[0] = header_byte;
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
|
|
if (ctx->tls_count_c4 > 0) {
|
|
void* blk = ctx->tls_freelist_c4[--ctx->tls_count_c4];
|
|
return SMALL_V6_USER_FROM_BASE(blk);
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|