Files
hakmem/core/smallobject_core_v6.c
Moe Charm (CI) 1e04debb1b Phase v6-5: C5 extension for SmallObject Core v6
Extend v6 architecture to support C5 (129-256B) in addition to C6 (257-512B):

- SmallHeapCtxV6: Add tls_freelist_c5[32] and tls_count_c5 for C5 TLS cache
- smallsegment_v6_box.h: Add SMALL_V6_C5_CLASS_IDX (5) and C5_BLOCK_SIZE (256)
- smallobject_cold_iface_v6.c: Generalize refill_page for both C5 (256 blocks/page)
  and C6 (128 blocks/page)
- smallobject_core_v6.c: Add C5 fast path (alloc/free) with TLS batching

Performance (v6 C5 enabled):
- C5-heavy: 41.0M ops/s (-23% vs v6 OFF 53.6M) - needs optimization
- Mixed: 36.2M ops/s (-18% vs v6 OFF 44.0M) - functional baseline

Note: C5 route requires optimization in next phase to match v6-3 performance.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-11 15:50:14 +09:00

246 lines
8.0 KiB
C
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// smallobject_core_v6.c - SmallObject Core v6 実装Phase v6-3
#include <stdlib.h>
#include <string.h>
#include "box/smallobject_core_v6_box.h"
#include "box/smallobject_cold_iface_v6.h"
#include "box/smallsegment_v6_box.h"
#include "box/tiny_route_env_box.h"
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
// TLS context
static __thread struct SmallHeapCtxV6 g_small_heap_ctx_v6;
static __thread int g_small_heap_ctx_v6_init = 0;
// TLS policy snapshot
static __thread struct SmallPolicySnapshotV6 g_snap_v6;
static __thread int g_snap_v6_init = 0;
/// Get TLS heap context for v6 (lazy initialization)
/// @return: TLS context pointer (never NULL)
SmallHeapCtxV6* small_heap_ctx_v6(void) {
if (!g_small_heap_ctx_v6_init) {
memset(&g_small_heap_ctx_v6, 0, sizeof(g_small_heap_ctx_v6));
// Initialize TLS segment ownership range
SmallSegmentV6* seg = small_segment_v6_acquire_for_thread();
if (seg && small_segment_v6_valid(seg)) {
g_small_heap_ctx_v6.tls_seg_base = seg->base;
g_small_heap_ctx_v6.tls_seg_end = seg->base + SMALL_SEGMENT_V6_SIZE;
}
g_small_heap_ctx_v6_init = 1;
}
return &g_small_heap_ctx_v6;
}
/// Get TLS policy snapshot for v6 (lazy initialization)
/// @return: Policy snapshot pointer (never NULL)
const SmallPolicySnapshotV6* tiny_policy_snapshot_v6(void) {
if (!g_snap_v6_init) {
memset(&g_snap_v6, 0, sizeof(g_snap_v6));
// Initialize route_kind from tiny_route API (this ensures init is done)
for (int i = 0; i < 8; i++) {
g_snap_v6.route_kind[i] = (uint8_t)tiny_route_for_class((uint8_t)i);
}
g_snap_v6_init = 1;
}
return &g_snap_v6;
}
// Forward declarations for pool v1 fallback
extern void* hak_pool_try_alloc(size_t size, uintptr_t site_id);
extern void hak_pool_free(void* ptr, size_t size, uintptr_t site_id);
// ============================================================================
// Allocation Implementation
// ============================================================================
/// Allocate block from C6 v6 TLS freelist or refill
/// @param size: requested size (unused, class_idx determines size)
/// @param class_idx: size class index (must be C6 for v6 route)
/// @param ctx: TLS context
/// @param snap: policy snapshot
/// @return: USER pointer (BASE+1) or NULL on fallback
void* small_alloc_fast_v6(size_t size,
uint32_t class_idx,
SmallHeapCtxV6* ctx,
const SmallPolicySnapshotV6* snap) {
(void)size;
// Bounds check
if (unlikely(class_idx >= 8)) {
return hak_pool_try_alloc(size, 0);
}
uint8_t route = snap->route_kind[class_idx];
// v6-5: Support C6 and C5 classes
if (route != TINY_ROUTE_SMALL_HEAP_V6) {
return hak_pool_try_alloc(size, 0);
}
// C6 fast path
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
// Fast path: TLS freelist hit
if (likely(ctx->tls_count_c6 > 0)) {
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
// v6-3: Header already written during refill, just return USER pointer
return SMALL_V6_USER_FROM_BASE(blk);
}
}
// C5 fast path (Phase v6-5)
else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
// Fast path: TLS freelist hit
if (likely(ctx->tls_count_c5 > 0)) {
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
return SMALL_V6_USER_FROM_BASE(blk);
}
}
else {
// Unsupported class for v6
return hak_pool_try_alloc(size, 0);
}
// Slow path: refill TLS with multiple blocks (batching)
SmallPageMetaV6* page = small_cold_v6_refill_page(class_idx);
if (!page || !page->free_list) {
return hak_pool_try_alloc(size, 0); // Safety fallback
}
// v6-5: Batch refill - support C6 and C5
uint8_t header_byte = SMALL_V6_HEADER_FROM_CLASS(class_idx);
if (class_idx == SMALL_V6_C6_CLASS_IDX) {
// C6 refill path
int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c6;
int filled = 0;
// Fill TLS (leave room for 1 to return)
while (page->free_list && filled < max_fill - 1) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
((uint8_t*)blk)[0] = header_byte;
ctx->tls_freelist_c6[ctx->tls_count_c6++] = blk;
filled++;
}
page->used += filled;
// Pop one more to return to caller
if (page->free_list) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
page->used++;
((uint8_t*)blk)[0] = header_byte;
return SMALL_V6_USER_FROM_BASE(blk);
}
// If we filled TLS but no more blocks, pop from TLS
if (ctx->tls_count_c6 > 0) {
void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6];
return SMALL_V6_USER_FROM_BASE(blk);
}
}
else if (class_idx == SMALL_V6_C5_CLASS_IDX) {
// C5 refill path (Phase v6-5)
int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c5;
int filled = 0;
// Fill TLS (leave room for 1 to return)
while (page->free_list && filled < max_fill - 1) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
((uint8_t*)blk)[0] = header_byte;
ctx->tls_freelist_c5[ctx->tls_count_c5++] = blk;
filled++;
}
page->used += filled;
// Pop one more to return to caller
if (page->free_list) {
void* blk = page->free_list;
page->free_list = *(void**)blk;
page->used++;
((uint8_t*)blk)[0] = header_byte;
return SMALL_V6_USER_FROM_BASE(blk);
}
// If we filled TLS but no more blocks, pop from TLS
if (ctx->tls_count_c5 > 0) {
void* blk = ctx->tls_freelist_c5[--ctx->tls_count_c5];
return SMALL_V6_USER_FROM_BASE(blk);
}
}
// Should not reach here
return hak_pool_try_alloc(size, 0);
}
// ============================================================================
// Free Implementation
// ============================================================================
/// Free block to C6 v6 TLS freelist or page freelist
/// @param ptr: USER pointer to free
/// @param class_idx: size class index
/// @param ctx: TLS context
/// @param snap: policy snapshot
void small_free_fast_v6(void* ptr,
uint32_t class_idx,
SmallHeapCtxV6* ctx,
const SmallPolicySnapshotV6* snap) {
// Bounds check
if (unlikely(class_idx >= 8)) {
hak_pool_free(ptr, 0, 0);
return;
}
uint8_t route = snap->route_kind[class_idx];
// v6-5: Check if this is CORE_V6 route
if (route != TINY_ROUTE_SMALL_HEAP_V6) {
hak_pool_free(ptr, 0, 0);
return;
}
// Convert USER pointer to BASE pointer
void* base = SMALL_V6_BASE_FROM_USER(ptr);
// Fast path: TLS segment ownership + TLS push
if (likely(small_tls_owns_ptr_v6(ctx, ptr))) {
// C6 TLS push
if (class_idx == SMALL_V6_C6_CLASS_IDX && ctx->tls_count_c6 < SMALL_V6_TLS_CAP) {
ctx->tls_freelist_c6[ctx->tls_count_c6++] = base;
return;
}
// C5 TLS push (Phase v6-5)
if (class_idx == SMALL_V6_C5_CLASS_IDX && ctx->tls_count_c5 < SMALL_V6_TLS_CAP) {
ctx->tls_freelist_c5[ctx->tls_count_c5++] = base;
return;
}
}
// Slow path: page_meta lookup and push to page freelist
SmallPageMetaV6* page = small_page_meta_v6_of(ptr);
if (!page) {
hak_pool_free(ptr, 0, 0);
return;
}
// Push to page freelist (using BASE pointer)
*(void**)base = page->free_list;
page->free_list = base;
if (page->used > 0) page->used--;
// Retire empty page
if (page->used == 0) {
small_cold_v6_retire_page(page);
}
}