Files
hakmem/core/smallobject_hotbox_v5.c

254 lines
8.3 KiB
C
Raw Normal View History

// smallobject_hotbox_v5.c - SmallObject HotBox v5 Full Implementation (Phase v5-2)
//
// Phase v5-2: C6-only full implementation with segment-based allocation
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include "box/smallsegment_v5_box.h"
#include "box/smallobject_hotbox_v5_box.h"
#include "box/smallobject_cold_iface_v5.h"
#include "box/smallobject_v5_env_box.h"
#include "tiny_region_id.h" // For tiny_region_id_write_header
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
// TLS context
static __thread SmallHeapCtxV5 g_small_heap_ctx_v5;
Phase v5-4: Header light mode & freelist optimization Implements header write optimization for C6 v5 allocator by moving header initialization from per-alloc time to carve time (during page refill). This eliminates redundant header writes on the hot path. Implementation: - Added HAKMEM_SMALL_HEAP_V5_HEADER_MODE ENV (full|light, default: full) - Added header_mode field to SmallHeapCtxV5 (cached per-thread) - Modified alloc fast/slow paths to skip header write in light mode - Modified refill to write headers during carve in light mode - Free path unchanged (header validation still works) Benchmark Results (2M iterations, ws=400): C6-HEAVY (257-768B): - Baseline (v5 OFF): 47.95 Mops/s - v5 full mode: 38.97 Mops/s (-18.7% vs baseline) - v5 light mode: 39.25 Mops/s (-18.1% vs baseline, +0.7% vs full) MIXED 16-1024B: - v5 OFF: 43.59 Mops/s - v5 full mode: 36.53 Mops/s (-16.2% vs OFF) - v5 light mode: 38.04 Mops/s (-12.7% vs OFF, +4.1% vs full) Analysis: - Light mode shows modest improvement over full (+0.7-4.1%) - C6 v5 performance gap vs baseline (-18%) indicates need for further optimization beyond header writes - Mixed workload benefits more from light mode (+4.1% vs full) - No regressions in safety/correctness observed Research findings: - Header write optimization alone insufficient to close v5 gap - Need to investigate other hot path costs (freelist ops, metadata access) - Light mode validates the carve-time header concept 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
2025-12-11 05:12:39 +09:00
static __thread int g_small_heap_ctx_v5_init = 0;
SmallHeapCtxV5* small_heap_ctx_v5(void) {
Phase v5-4: Header light mode & freelist optimization Implements header write optimization for C6 v5 allocator by moving header initialization from per-alloc time to carve time (during page refill). This eliminates redundant header writes on the hot path. Implementation: - Added HAKMEM_SMALL_HEAP_V5_HEADER_MODE ENV (full|light, default: full) - Added header_mode field to SmallHeapCtxV5 (cached per-thread) - Modified alloc fast/slow paths to skip header write in light mode - Modified refill to write headers during carve in light mode - Free path unchanged (header validation still works) Benchmark Results (2M iterations, ws=400): C6-HEAVY (257-768B): - Baseline (v5 OFF): 47.95 Mops/s - v5 full mode: 38.97 Mops/s (-18.7% vs baseline) - v5 light mode: 39.25 Mops/s (-18.1% vs baseline, +0.7% vs full) MIXED 16-1024B: - v5 OFF: 43.59 Mops/s - v5 full mode: 36.53 Mops/s (-16.2% vs OFF) - v5 light mode: 38.04 Mops/s (-12.7% vs OFF, +4.1% vs full) Analysis: - Light mode shows modest improvement over full (+0.7-4.1%) - C6 v5 performance gap vs baseline (-18%) indicates need for further optimization beyond header writes - Mixed workload benefits more from light mode (+4.1% vs full) - No regressions in safety/correctness observed Research findings: - Header write optimization alone insufficient to close v5 gap - Need to investigate other hot path costs (freelist ops, metadata access) - Light mode validates the carve-time header concept 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
2025-12-11 05:12:39 +09:00
// Phase v5-4: Lazy initialization of header_mode (cached from ENV once per thread)
if (unlikely(!g_small_heap_ctx_v5_init)) {
g_small_heap_ctx_v5.header_mode = (uint8_t)small_heap_v5_header_mode();
g_small_heap_ctx_v5_init = 1;
}
return &g_small_heap_ctx_v5;
}
// Forward declarations for pool v1 fallback
extern void* hak_pool_try_alloc(size_t size, uintptr_t site_id);
extern void hak_pool_free(void* ptr, size_t size, uintptr_t site_id);
// ============================================================================
// Helper: Slow path (refill from partial or cold)
// ============================================================================
static SmallPageMetaV5* alloc_slow_v5(SmallHeapCtxV5* ctx, uint32_t class_idx) {
SmallClassHeapV5* h = &ctx->cls[class_idx];
SmallPageMetaV5* cur = h->current;
// If current exists but is exhausted, move to full list only
// (exhausted pages are fully allocated, not partially free)
if (cur && !cur->free_list) {
SMALL_PAGE_V5_PUSH_FULL(h, cur);
h->current = NULL;
}
// Try to pop from partial list (pages with some free blocks)
SmallPageMetaV5* from_partial = SMALL_PAGE_V5_POP_PARTIAL(h);
if (from_partial) {
h->current = from_partial;
return from_partial;
}
// Refill from cold interface (allocates new page)
SmallPageMetaV5* page = small_cold_v5_refill_page(ctx, class_idx);
if (!page) return NULL;
h->current = page;
return page;
}
// ============================================================================
// Phase v5-2: Fast alloc (C6-only full implementation)
// ============================================================================
void* small_alloc_fast_v5(size_t size, uint32_t class_idx, SmallHeapCtxV5* ctx) {
(void)size; // Not used in fast path
// C6-only check
if (unlikely(class_idx != SMALL_HEAP_V5_C6_CLASS_IDX)) {
// Fallback to pool v1 for non-C6 classes
return hak_pool_try_alloc(size, 0);
}
SmallClassHeapV5* h = &ctx->cls[SMALL_HEAP_V5_C6_CLASS_IDX];
SmallPageMetaV5* page = h->current;
// Fast path: Try current page freelist
if (likely(page && page->free_list)) {
void* blk = page->free_list;
void* next = NULL;
memcpy(&next, blk, sizeof(void*));
page->free_list = next;
page->used++;
Phase v5-4: Header light mode & freelist optimization Implements header write optimization for C6 v5 allocator by moving header initialization from per-alloc time to carve time (during page refill). This eliminates redundant header writes on the hot path. Implementation: - Added HAKMEM_SMALL_HEAP_V5_HEADER_MODE ENV (full|light, default: full) - Added header_mode field to SmallHeapCtxV5 (cached per-thread) - Modified alloc fast/slow paths to skip header write in light mode - Modified refill to write headers during carve in light mode - Free path unchanged (header validation still works) Benchmark Results (2M iterations, ws=400): C6-HEAVY (257-768B): - Baseline (v5 OFF): 47.95 Mops/s - v5 full mode: 38.97 Mops/s (-18.7% vs baseline) - v5 light mode: 39.25 Mops/s (-18.1% vs baseline, +0.7% vs full) MIXED 16-1024B: - v5 OFF: 43.59 Mops/s - v5 full mode: 36.53 Mops/s (-16.2% vs OFF) - v5 light mode: 38.04 Mops/s (-12.7% vs OFF, +4.1% vs full) Analysis: - Light mode shows modest improvement over full (+0.7-4.1%) - C6 v5 performance gap vs baseline (-18%) indicates need for further optimization beyond header writes - Mixed workload benefits more from light mode (+4.1% vs full) - No regressions in safety/correctness observed Research findings: - Header write optimization alone insufficient to close v5 gap - Need to investigate other hot path costs (freelist ops, metadata access) - Light mode validates the carve-time header concept 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
2025-12-11 05:12:39 +09:00
// Phase v5-4: Header light mode optimization
if (ctx->header_mode == SMALL_HEAP_V5_HEADER_MODE_LIGHT) {
// light mode: header already written during carve, skip per-alloc write
return (uint8_t*)blk + 1; // return USER pointer (skip header byte)
} else {
// full mode: write header on every alloc (standard behavior)
return tiny_region_id_write_header(blk, class_idx);
}
}
// Slow path: Current exhausted or NULL
page = alloc_slow_v5(ctx, class_idx);
if (unlikely(!page || !page->free_list)) {
// Cold refill failed, fallback to pool v1
return hak_pool_try_alloc(size, 0);
}
// Allocate from newly acquired page
void* blk = page->free_list;
void* next = NULL;
memcpy(&next, blk, sizeof(void*));
page->free_list = next;
page->used++;
Phase v5-4: Header light mode & freelist optimization Implements header write optimization for C6 v5 allocator by moving header initialization from per-alloc time to carve time (during page refill). This eliminates redundant header writes on the hot path. Implementation: - Added HAKMEM_SMALL_HEAP_V5_HEADER_MODE ENV (full|light, default: full) - Added header_mode field to SmallHeapCtxV5 (cached per-thread) - Modified alloc fast/slow paths to skip header write in light mode - Modified refill to write headers during carve in light mode - Free path unchanged (header validation still works) Benchmark Results (2M iterations, ws=400): C6-HEAVY (257-768B): - Baseline (v5 OFF): 47.95 Mops/s - v5 full mode: 38.97 Mops/s (-18.7% vs baseline) - v5 light mode: 39.25 Mops/s (-18.1% vs baseline, +0.7% vs full) MIXED 16-1024B: - v5 OFF: 43.59 Mops/s - v5 full mode: 36.53 Mops/s (-16.2% vs OFF) - v5 light mode: 38.04 Mops/s (-12.7% vs OFF, +4.1% vs full) Analysis: - Light mode shows modest improvement over full (+0.7-4.1%) - C6 v5 performance gap vs baseline (-18%) indicates need for further optimization beyond header writes - Mixed workload benefits more from light mode (+4.1% vs full) - No regressions in safety/correctness observed Research findings: - Header write optimization alone insufficient to close v5 gap - Need to investigate other hot path costs (freelist ops, metadata access) - Light mode validates the carve-time header concept 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
2025-12-11 05:12:39 +09:00
// Phase v5-4: Header light mode optimization
if (ctx->header_mode == SMALL_HEAP_V5_HEADER_MODE_LIGHT) {
// light mode: header already written during carve, skip per-alloc write
return (uint8_t*)blk + 1; // return USER pointer (skip header byte)
} else {
// full mode: write header on every alloc (standard behavior)
return tiny_region_id_write_header(blk, class_idx);
}
}
// ============================================================================
// Helper: Determine page location in heap lists (Phase v5-3)
// ============================================================================
static inline page_loc_t get_page_location(SmallClassHeapV5* h, SmallPageMetaV5* page,
SmallPageMetaV5** prev_out) {
if (prev_out) *prev_out = NULL;
if (!h || !page) return LOC_NONE;
// Check current (O(1))
if (h->current == page) {
return LOC_CURRENT;
}
// Check partial list (typically 0-1 pages in v5-3)
SmallPageMetaV5* prev = NULL;
for (SmallPageMetaV5* p = h->partial_head; p; prev = p, p = p->next) {
if (p == page) {
if (prev_out) *prev_out = prev;
return LOC_PARTIAL;
}
}
// Check full list
prev = NULL;
for (SmallPageMetaV5* p = h->full_head; p; prev = p, p = p->next) {
if (p == page) {
if (prev_out) *prev_out = prev;
return LOC_FULL;
}
}
return LOC_NONE;
}
// ============================================================================
// Phase v5-3: Fast free (C6-only O(1) implementation)
// ============================================================================
void small_free_fast_v5(void* ptr, uint32_t class_idx, SmallHeapCtxV5* ctx) {
if (unlikely(!ptr)) {
return;
}
// C6-only check
if (unlikely(class_idx != SMALL_HEAP_V5_C6_CLASS_IDX)) {
hak_pool_free(ptr, 0, 0);
return;
}
// Phase v5-3: O(1) segment lookup (no list search)
SmallPageMetaV5* page = small_segment_v5_page_meta_of(ptr);
if (unlikely(!page)) {
// Not in v5 segment, fallback to pool v1
hak_pool_free(ptr, 0, 0);
return;
}
SmallClassHeapV5* h = &ctx->cls[SMALL_HEAP_V5_C6_CLASS_IDX];
// Push to freelist (O(1))
void* head = page->free_list;
memcpy(ptr, &head, sizeof(void*));
page->free_list = ptr;
if (page->used > 0) {
page->used--;
}
// Handle empty page (used == 0)
if (page->used == 0) {
// Fast path: if this is current, just keep it
if (h->current == page) {
return;
}
// Determine location and unlink (rare path)
SmallPageMetaV5* prev = NULL;
page_loc_t loc = get_page_location(h, page, &prev);
if (loc != LOC_NONE && loc != LOC_CURRENT) {
SMALL_PAGE_V5_UNLINK(h, loc, prev, page);
}
// Promote to current if empty
if (!h->current) {
h->current = page;
page->next = NULL;
return;
}
// Try partial (limit 1)
if (h->partial_count < SMALL_HEAP_V5_C6_PARTIAL_LIMIT) {
SMALL_PAGE_V5_PUSH_PARTIAL(h, page);
return;
}
// Retire to cold
small_cold_v5_retire_page(ctx, page);
return;
}
// Page not empty - handle full→partial transition
if (h->current != page) {
SmallPageMetaV5* prev = NULL;
page_loc_t loc = get_page_location(h, page, &prev);
if (loc == LOC_FULL && page->free_list) {
// Move from full to partial
SMALL_PAGE_V5_UNLINK(h, loc, prev, page);
if (h->partial_count < SMALL_HEAP_V5_C6_PARTIAL_LIMIT) {
SMALL_PAGE_V5_PUSH_PARTIAL(h, page);
} else {
SMALL_PAGE_V5_PUSH_FULL(h, page);
}
} else if (!h->current) {
// No current, promote this
if (loc != LOC_NONE) {
SMALL_PAGE_V5_UNLINK(h, loc, prev, page);
}
h->current = page;
page->next = NULL;
}
}
}
// ============================================================================
// Helper: C6 block size query
// ============================================================================
uint32_t small_heap_v5_c6_block_size(void) {
return SMALL_HEAP_V5_C6_BLOCK_SIZE;
}