// smallobject_hotbox_v5.c - SmallObject HotBox v5 Full Implementation (Phase v5-2) // // Phase v5-2: C6-only full implementation with segment-based allocation #include #include #include #include "box/smallsegment_v5_box.h" #include "box/smallobject_hotbox_v5_box.h" #include "box/smallobject_cold_iface_v5.h" #include "box/smallobject_v5_env_box.h" #include "tiny_region_id.h" // For tiny_region_id_write_header #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif // TLS context static __thread SmallHeapCtxV5 g_small_heap_ctx_v5; static __thread int g_small_heap_ctx_v5_init = 0; SmallHeapCtxV5* small_heap_ctx_v5(void) { // Phase v5-4: Lazy initialization of header_mode (cached from ENV once per thread) if (unlikely(!g_small_heap_ctx_v5_init)) { g_small_heap_ctx_v5.header_mode = (uint8_t)small_heap_v5_header_mode(); g_small_heap_ctx_v5_init = 1; } return &g_small_heap_ctx_v5; } // Forward declarations for pool v1 fallback extern void* hak_pool_try_alloc(size_t size, uintptr_t site_id); extern void hak_pool_free(void* ptr, size_t size, uintptr_t site_id); // ============================================================================ // Helper: Slow path (refill from partial or cold) // ============================================================================ static SmallPageMetaV5* alloc_slow_v5(SmallHeapCtxV5* ctx, uint32_t class_idx) { SmallClassHeapV5* h = &ctx->cls[class_idx]; SmallPageMetaV5* cur = h->current; // If current exists but is exhausted, move to full list only // (exhausted pages are fully allocated, not partially free) if (cur && !cur->free_list) { SMALL_PAGE_V5_PUSH_FULL(h, cur); h->current = NULL; } // Try to pop from partial list (pages with some free blocks) SmallPageMetaV5* from_partial = SMALL_PAGE_V5_POP_PARTIAL(h); if (from_partial) { h->current = from_partial; return from_partial; } // Refill from cold interface (allocates new page) SmallPageMetaV5* page = small_cold_v5_refill_page(ctx, class_idx); if (!page) return NULL; h->current = page; return page; } // ============================================================================ // Phase v5-2: Fast alloc (C6-only full implementation) // ============================================================================ void* small_alloc_fast_v5(size_t size, uint32_t class_idx, SmallHeapCtxV5* ctx) { (void)size; // Not used in fast path // C6-only check if (unlikely(class_idx != SMALL_HEAP_V5_C6_CLASS_IDX)) { // Fallback to pool v1 for non-C6 classes return hak_pool_try_alloc(size, 0); } SmallClassHeapV5* h = &ctx->cls[SMALL_HEAP_V5_C6_CLASS_IDX]; SmallPageMetaV5* page = h->current; // Fast path: Try current page freelist if (likely(page && page->free_list)) { void* blk = page->free_list; void* next = NULL; memcpy(&next, blk, sizeof(void*)); page->free_list = next; page->used++; // Phase v5-4: Header light mode optimization if (ctx->header_mode == SMALL_HEAP_V5_HEADER_MODE_LIGHT) { // light mode: header already written during carve, skip per-alloc write return (uint8_t*)blk + 1; // return USER pointer (skip header byte) } else { // full mode: write header on every alloc (standard behavior) return tiny_region_id_write_header(blk, class_idx); } } // Slow path: Current exhausted or NULL page = alloc_slow_v5(ctx, class_idx); if (unlikely(!page || !page->free_list)) { // Cold refill failed, fallback to pool v1 return hak_pool_try_alloc(size, 0); } // Allocate from newly acquired page void* blk = page->free_list; void* next = NULL; memcpy(&next, blk, sizeof(void*)); page->free_list = next; page->used++; // Phase v5-4: Header light mode optimization if (ctx->header_mode == SMALL_HEAP_V5_HEADER_MODE_LIGHT) { // light mode: header already written during carve, skip per-alloc write return (uint8_t*)blk + 1; // return USER pointer (skip header byte) } else { // full mode: write header on every alloc (standard behavior) return tiny_region_id_write_header(blk, class_idx); } } // ============================================================================ // Helper: Determine page location in heap lists (Phase v5-3) // ============================================================================ static inline page_loc_t get_page_location(SmallClassHeapV5* h, SmallPageMetaV5* page, SmallPageMetaV5** prev_out) { if (prev_out) *prev_out = NULL; if (!h || !page) return LOC_NONE; // Check current (O(1)) if (h->current == page) { return LOC_CURRENT; } // Check partial list (typically 0-1 pages in v5-3) SmallPageMetaV5* prev = NULL; for (SmallPageMetaV5* p = h->partial_head; p; prev = p, p = p->next) { if (p == page) { if (prev_out) *prev_out = prev; return LOC_PARTIAL; } } // Check full list prev = NULL; for (SmallPageMetaV5* p = h->full_head; p; prev = p, p = p->next) { if (p == page) { if (prev_out) *prev_out = prev; return LOC_FULL; } } return LOC_NONE; } // ============================================================================ // Phase v5-3: Fast free (C6-only O(1) implementation) // ============================================================================ void small_free_fast_v5(void* ptr, uint32_t class_idx, SmallHeapCtxV5* ctx) { if (unlikely(!ptr)) { return; } // C6-only check if (unlikely(class_idx != SMALL_HEAP_V5_C6_CLASS_IDX)) { hak_pool_free(ptr, 0, 0); return; } // Phase v5-3: O(1) segment lookup (no list search) SmallPageMetaV5* page = small_segment_v5_page_meta_of(ptr); if (unlikely(!page)) { // Not in v5 segment, fallback to pool v1 hak_pool_free(ptr, 0, 0); return; } SmallClassHeapV5* h = &ctx->cls[SMALL_HEAP_V5_C6_CLASS_IDX]; // Push to freelist (O(1)) void* head = page->free_list; memcpy(ptr, &head, sizeof(void*)); page->free_list = ptr; if (page->used > 0) { page->used--; } // Handle empty page (used == 0) if (page->used == 0) { // Fast path: if this is current, just keep it if (h->current == page) { return; } // Determine location and unlink (rare path) SmallPageMetaV5* prev = NULL; page_loc_t loc = get_page_location(h, page, &prev); if (loc != LOC_NONE && loc != LOC_CURRENT) { SMALL_PAGE_V5_UNLINK(h, loc, prev, page); } // Promote to current if empty if (!h->current) { h->current = page; page->next = NULL; return; } // Try partial (limit 1) if (h->partial_count < SMALL_HEAP_V5_C6_PARTIAL_LIMIT) { SMALL_PAGE_V5_PUSH_PARTIAL(h, page); return; } // Retire to cold small_cold_v5_retire_page(ctx, page); return; } // Page not empty - handle full→partial transition if (h->current != page) { SmallPageMetaV5* prev = NULL; page_loc_t loc = get_page_location(h, page, &prev); if (loc == LOC_FULL && page->free_list) { // Move from full to partial SMALL_PAGE_V5_UNLINK(h, loc, prev, page); if (h->partial_count < SMALL_HEAP_V5_C6_PARTIAL_LIMIT) { SMALL_PAGE_V5_PUSH_PARTIAL(h, page); } else { SMALL_PAGE_V5_PUSH_FULL(h, page); } } else if (!h->current) { // No current, promote this if (loc != LOC_NONE) { SMALL_PAGE_V5_UNLINK(h, loc, prev, page); } h->current = page; page->next = NULL; } } } // ============================================================================ // Helper: C6 block size query // ============================================================================ uint32_t small_heap_v5_c6_block_size(void) { return SMALL_HEAP_V5_C6_BLOCK_SIZE; }