// smallobject_core_v6.c - SmallObject Core v6 実装(Phase v6-3) #include #include #include "box/smallobject_core_v6_box.h" #include "box/smallobject_cold_iface_v6.h" #include "box/smallsegment_v6_box.h" #include "box/tiny_route_env_box.h" #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif // TLS context static __thread struct SmallHeapCtxV6 g_small_heap_ctx_v6; static __thread int g_small_heap_ctx_v6_init = 0; // TLS policy snapshot static __thread struct SmallPolicySnapshotV6 g_snap_v6; static __thread int g_snap_v6_init = 0; /// Get TLS heap context for v6 (lazy initialization) /// @return: TLS context pointer (never NULL) SmallHeapCtxV6* small_heap_ctx_v6(void) { if (!g_small_heap_ctx_v6_init) { memset(&g_small_heap_ctx_v6, 0, sizeof(g_small_heap_ctx_v6)); // Initialize TLS segment ownership range SmallSegmentV6* seg = small_segment_v6_acquire_for_thread(); if (seg && small_segment_v6_valid(seg)) { g_small_heap_ctx_v6.tls_seg_base = seg->base; g_small_heap_ctx_v6.tls_seg_end = seg->base + SMALL_SEGMENT_V6_SIZE; } g_small_heap_ctx_v6_init = 1; } return &g_small_heap_ctx_v6; } /// Get TLS policy snapshot for v6 (lazy initialization) /// @return: Policy snapshot pointer (never NULL) const SmallPolicySnapshotV6* tiny_policy_snapshot_v6(void) { if (!g_snap_v6_init) { memset(&g_snap_v6, 0, sizeof(g_snap_v6)); // Initialize route_kind from tiny_route API (this ensures init is done) for (int i = 0; i < 8; i++) { g_snap_v6.route_kind[i] = (uint8_t)tiny_route_for_class((uint8_t)i); } g_snap_v6_init = 1; } return &g_snap_v6; } // Forward declarations for pool v1 fallback extern void* hak_pool_try_alloc(size_t size, uintptr_t site_id); extern void hak_pool_free(void* ptr, size_t size, uintptr_t site_id); // ============================================================================ // Allocation Implementation // ============================================================================ /// Allocate block from C6 v6 TLS freelist or refill /// @param size: requested size (unused, class_idx determines size) /// @param class_idx: size class index (must be C6 for v6 route) /// @param ctx: TLS context /// @param snap: policy snapshot /// @return: USER pointer (BASE+1) or NULL on fallback void* small_alloc_fast_v6(size_t size, uint32_t class_idx, SmallHeapCtxV6* ctx, const SmallPolicySnapshotV6* snap) { (void)size; // Bounds check if (unlikely(class_idx >= 8)) { return hak_pool_try_alloc(size, 0); } uint8_t route = snap->route_kind[class_idx]; // Check if this is CORE_V6 route and C6 class if (route != TINY_ROUTE_SMALL_HEAP_V6 || class_idx != SMALL_V6_C6_CLASS_IDX) { return hak_pool_try_alloc(size, 0); } // Fast path: TLS freelist hit if (likely(ctx->tls_count_c6 > 0)) { void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6]; // v6-3: Header already written during refill, just return USER pointer return SMALL_V6_USER_FROM_BASE(blk); } // Slow path: refill TLS with multiple blocks (batching) SmallPageMetaV6* page = small_cold_v6_refill_page(class_idx); if (!page || !page->free_list) { return hak_pool_try_alloc(size, 0); // Safety fallback } // v6-3: Batch refill - fill TLS with as many blocks as possible // AND write headers in batch (not per-alloc) uint8_t header_byte = SMALL_V6_HEADER_FROM_CLASS(class_idx); int max_fill = SMALL_V6_TLS_CAP - ctx->tls_count_c6; // Currently 0, so max_fill = 32 int filled = 0; // Fill TLS (leave room for 1 to return) while (page->free_list && filled < max_fill - 1) { void* blk = page->free_list; page->free_list = *(void**)blk; // v6-3: Write header NOW (after pop, before storing in TLS) ((uint8_t*)blk)[0] = header_byte; ctx->tls_freelist_c6[ctx->tls_count_c6++] = blk; // Store BASE filled++; } page->used += filled; // Pop one more to return to caller if (page->free_list) { void* blk = page->free_list; page->free_list = *(void**)blk; page->used++; // v6-3: Write header and return USER pointer ((uint8_t*)blk)[0] = header_byte; return SMALL_V6_USER_FROM_BASE(blk); } // If we filled TLS but no more blocks, pop from TLS if (ctx->tls_count_c6 > 0) { void* blk = ctx->tls_freelist_c6[--ctx->tls_count_c6]; // Header already written in the loop above return SMALL_V6_USER_FROM_BASE(blk); } // Should not reach here return hak_pool_try_alloc(size, 0); } // ============================================================================ // Free Implementation // ============================================================================ /// Free block to C6 v6 TLS freelist or page freelist /// @param ptr: USER pointer to free /// @param class_idx: size class index /// @param ctx: TLS context /// @param snap: policy snapshot void small_free_fast_v6(void* ptr, uint32_t class_idx, SmallHeapCtxV6* ctx, const SmallPolicySnapshotV6* snap) { // Bounds check if (unlikely(class_idx >= 8)) { hak_pool_free(ptr, 0, 0); return; } uint8_t route = snap->route_kind[class_idx]; // Check if this is CORE_V6 route and C6 class if (route != TINY_ROUTE_SMALL_HEAP_V6 || class_idx != SMALL_V6_C6_CLASS_IDX) { hak_pool_free(ptr, 0, 0); return; } // Convert USER pointer to BASE pointer void* base = SMALL_V6_BASE_FROM_USER(ptr); // Fast path: TLS segment ownership + TLS push if (likely(small_tls_owns_ptr_v6(ctx, ptr))) { if (ctx->tls_count_c6 < SMALL_V6_TLS_CAP) { ctx->tls_freelist_c6[ctx->tls_count_c6++] = base; // Store BASE return; } } // Slow path: page_meta lookup and push to page freelist SmallPageMetaV6* page = small_page_meta_v6_of(ptr); if (!page) { hak_pool_free(ptr, 0, 0); return; } // Push to page freelist (using BASE pointer) *(void**)base = page->free_list; page->free_list = base; if (page->used > 0) page->used--; // Retire empty page if (page->used == 0) { small_cold_v6_retire_page(page); } }