// smallobject_hotbox_v5.c - SmallObject HotBox v5 Full Implementation (Phase v5-2) // // Phase v5-2: C6-only full implementation with segment-based allocation #include #include #include #include "box/smallsegment_v5_box.h" #include "box/smallobject_hotbox_v5_box.h" #include "box/smallobject_cold_iface_v5.h" #include "box/smallobject_v5_env_box.h" #include "tiny_region_id.h" // For tiny_region_id_write_header #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif // TLS context static __thread SmallHeapCtxV5 g_small_heap_ctx_v5; SmallHeapCtxV5* small_heap_ctx_v5(void) { return &g_small_heap_ctx_v5; } // Forward declarations for pool v1 fallback extern void* hak_pool_try_alloc(size_t size, uintptr_t site_id); extern void hak_pool_free(void* ptr, size_t size, uintptr_t site_id); // ============================================================================ // Helper: Slow path (refill from partial or cold) // ============================================================================ static SmallPageMetaV5* alloc_slow_v5(SmallHeapCtxV5* ctx, uint32_t class_idx) { SmallClassHeapV5* h = &ctx->cls[class_idx]; SmallPageMetaV5* cur = h->current; // If current exists but is exhausted, move to full list only // (exhausted pages are fully allocated, not partially free) if (cur && !cur->free_list) { SMALL_PAGE_V5_PUSH_FULL(h, cur); h->current = NULL; } // Try to pop from partial list (pages with some free blocks) SmallPageMetaV5* from_partial = SMALL_PAGE_V5_POP_PARTIAL(h); if (from_partial) { h->current = from_partial; return from_partial; } // Refill from cold interface (allocates new page) SmallPageMetaV5* page = small_cold_v5_refill_page(ctx, class_idx); if (!page) return NULL; h->current = page; return page; } // ============================================================================ // Phase v5-2: Fast alloc (C6-only full implementation) // ============================================================================ void* small_alloc_fast_v5(size_t size, uint32_t class_idx, SmallHeapCtxV5* ctx) { (void)size; // Not used in fast path // C6-only check if (unlikely(class_idx != SMALL_HEAP_V5_C6_CLASS_IDX)) { // Fallback to pool v1 for non-C6 classes return hak_pool_try_alloc(size, 0); } SmallClassHeapV5* h = &ctx->cls[SMALL_HEAP_V5_C6_CLASS_IDX]; SmallPageMetaV5* page = h->current; // Fast path: Try current page freelist if (likely(page && page->free_list)) { void* blk = page->free_list; void* next = NULL; memcpy(&next, blk, sizeof(void*)); page->free_list = next; page->used++; // Write header and return USER pointer return tiny_region_id_write_header(blk, class_idx); } // Slow path: Current exhausted or NULL page = alloc_slow_v5(ctx, class_idx); if (unlikely(!page || !page->free_list)) { // Cold refill failed, fallback to pool v1 return hak_pool_try_alloc(size, 0); } // Allocate from newly acquired page void* blk = page->free_list; void* next = NULL; memcpy(&next, blk, sizeof(void*)); page->free_list = next; page->used++; // Write header and return USER pointer return tiny_region_id_write_header(blk, class_idx); } // ============================================================================ // Helper: Find page containing pointer // ============================================================================ static inline int ptr_in_page(const SmallPageMetaV5* page, const uint8_t* ptr) { if (!page || !ptr || !page->segment) return 0; SmallSegmentV5* seg = (SmallSegmentV5*)page->segment; uintptr_t page_base = seg->base + ((uintptr_t)page->page_idx * SMALL_SEGMENT_V5_PAGE_SIZE); size_t span = (size_t)page->capacity * SMALL_HEAP_V5_C6_BLOCK_SIZE; if ((uintptr_t)ptr < page_base || (uintptr_t)ptr >= page_base + span) return 0; // Check alignment size_t off = (uintptr_t)ptr - page_base; return (off % SMALL_HEAP_V5_C6_BLOCK_SIZE) == 0; } static SmallPageMetaV5* find_page(SmallClassHeapV5* h, const uint8_t* ptr, page_loc_t* loc, SmallPageMetaV5** prev_out) { if (loc) *loc = LOC_NONE; if (prev_out) *prev_out = NULL; if (!h || !ptr) return NULL; // Check current if (h->current && ptr_in_page(h->current, ptr)) { if (loc) *loc = LOC_CURRENT; return h->current; } // Check partial list SmallPageMetaV5* prev = NULL; for (SmallPageMetaV5* p = h->partial_head; p; prev = p, p = p->next) { if (ptr_in_page(p, ptr)) { if (loc) *loc = LOC_PARTIAL; if (prev_out) *prev_out = prev; return p; } } // Check full list prev = NULL; for (SmallPageMetaV5* p = h->full_head; p; prev = p, p = p->next) { if (ptr_in_page(p, ptr)) { if (loc) *loc = LOC_FULL; if (prev_out) *prev_out = prev; return p; } } return NULL; } // ============================================================================ // Phase v5-2: Fast free (C6-only full implementation) // ============================================================================ void small_free_fast_v5(void* ptr, uint32_t class_idx, SmallHeapCtxV5* ctx) { if (unlikely(!ptr)) { return; } // C6-only check if (unlikely(class_idx != SMALL_HEAP_V5_C6_CLASS_IDX)) { // Fallback to pool v1 for non-C6 classes hak_pool_free(ptr, 0, 0); return; } SmallClassHeapV5* h = &ctx->cls[SMALL_HEAP_V5_C6_CLASS_IDX]; // Try O(1) segment lookup first (Phase v5-2 optimization) SmallPageMetaV5* page = small_segment_v5_page_meta_of(ptr); page_loc_t loc = LOC_NONE; SmallPageMetaV5* prev = NULL; // If segment lookup failed, search through lists (fallback) if (!page) { page = find_page(h, (const uint8_t*)ptr, &loc, &prev); if (!page) { // Not found in v5 heap, fallback to pool v1 hak_pool_free(ptr, 0, 0); return; } } else { // Segment lookup succeeded, determine location in lists if (h->current == page) { loc = LOC_CURRENT; } else { // Search in partial/full lists to get prev pointer find_page(h, (const uint8_t*)ptr, &loc, &prev); } } // Push to freelist void* head = page->free_list; memcpy(ptr, &head, sizeof(void*)); page->free_list = ptr; if (page->used > 0) { page->used--; } // Handle empty page (used == 0) if (page->used == 0) { // Unlink from current location if (loc != LOC_CURRENT) { SMALL_PAGE_V5_UNLINK(h, loc, prev, page); } // Try to make it current if we don't have one if (!h->current) { h->current = page; page->next = NULL; return; } // Already have current, check if we can keep in partial if (h->current == page) { page->next = NULL; return; } // Try to push to partial list if (h->partial_count < SMALL_HEAP_V5_C6_PARTIAL_LIMIT) { SMALL_PAGE_V5_PUSH_PARTIAL(h, page); return; } // Partial list full, retire the page small_cold_v5_retire_page(ctx, page); return; } // Page is not empty, handle transitions if (!h->current) { // No current page, promote this one if (loc != LOC_CURRENT) { SMALL_PAGE_V5_UNLINK(h, loc, prev, page); } h->current = page; page->next = NULL; } else if (loc == LOC_FULL && page->free_list) { // Move from full to partial (now has free blocks) SMALL_PAGE_V5_UNLINK(h, loc, prev, page); if (h->partial_count < SMALL_HEAP_V5_C6_PARTIAL_LIMIT) { SMALL_PAGE_V5_PUSH_PARTIAL(h, page); } else { SMALL_PAGE_V5_PUSH_FULL(h, page); // Keep in full if partial limit exceeded } } } // ============================================================================ // Helper: C6 block size query // ============================================================================ uint32_t small_heap_v5_c6_block_size(void) { return SMALL_HEAP_V5_C6_BLOCK_SIZE; }