diff --git a/core/box/tiny_front_cold_box.h b/core/box/tiny_front_cold_box.h index 9dedbdc3..2aba2a97 100644 --- a/core/box/tiny_front_cold_box.h +++ b/core/box/tiny_front_cold_box.h @@ -57,10 +57,10 @@ __attribute__((noinline, cold)) static inline void* tiny_cold_refill_and_alloc(int class_idx) { // Refill cache from SuperSlab (batch allocation) - // unified_cache_refill() returns first block directly - void* base = unified_cache_refill(class_idx); + // unified_cache_refill() returns first BASE block (wrapped) + hak_base_ptr_t base = unified_cache_refill(class_idx); - if (base == NULL) { + if (hak_base_is_null(base)) { // Refill failed (SuperSlab allocation error, or cache disabled) #if !HAKMEM_BUILD_RELEASE static __thread uint64_t g_refill_fail_count[TINY_NUM_CLASSES] = {0}; @@ -79,9 +79,10 @@ static inline void* tiny_cold_refill_and_alloc(int class_idx) { #if HAKMEM_TINY_HEADER_CLASSIDX // Use centralized layout API for offset calculation size_t user_offset = tiny_user_offset(class_idx); - return (void*)((char*)base + user_offset); // USER pointer + void* raw_base = HAK_BASE_TO_RAW(base); + return (void*)((char*)raw_base + user_offset); // USER pointer #else - return base; + return HAK_BASE_TO_RAW(base); #endif } diff --git a/core/front/malloc_tiny_fast.h b/core/front/malloc_tiny_fast.h index f0c1f9ae..4532d946 100644 --- a/core/front/malloc_tiny_fast.h +++ b/core/front/malloc_tiny_fast.h @@ -231,7 +231,7 @@ static inline int free_tiny_fast(void* ptr) { } #endif - int pushed = unified_cache_push(class_idx, base); + int pushed = unified_cache_push(class_idx, HAK_BASE_FROM_RAW(base)); if (__builtin_expect(pushed, 1)) { return 1; // Success } diff --git a/core/front/tiny_unified_cache.c b/core/front/tiny_unified_cache.c index 1250ec14..2cb79ad3 100644 --- a/core/front/tiny_unified_cache.c +++ b/core/front/tiny_unified_cache.c @@ -292,14 +292,14 @@ static inline int unified_refill_validate_base(int class_idx, } // Batch refill from SuperSlab (called on cache miss) -// Returns: BASE pointer (first block), or NULL if failed +// Returns: BASE pointer (first block, wrapped), or NULL-wrapped if failed // Design: Direct carve from SuperSlab to array (no TLS SLL intermediate layer) -void* unified_cache_refill(int class_idx) { +hak_base_ptr_t unified_cache_refill(int class_idx) { TinyTLSSlab* tls = &g_tls_slabs[class_idx]; // Step 1: Ensure SuperSlab available if (!tls->ss) { - if (!superslab_refill(class_idx)) return NULL; + if (!superslab_refill(class_idx)) return HAK_BASE_FROM_RAW(NULL); tls = &g_tls_slabs[class_idx]; // Reload after refill } @@ -322,7 +322,7 @@ void* unified_cache_refill(int class_idx) { room = cache->capacity - (cache->tail - cache->head) - 1; } - if (room <= 0) return NULL; + if (room <= 0) return HAK_BASE_FROM_RAW(NULL); if (room > 128) room = 128; // Batch size limit // Step 3: Direct carve from SuperSlab into local array (bypass TLS SLL!) @@ -425,7 +425,7 @@ void* unified_cache_refill(int class_idx) { } } - if (produced == 0) return NULL; + if (produced == 0) return HAK_BASE_FROM_RAW(NULL); // Step 4: Update active counter // Guard: tls->ss can be NULL if all SuperSlab refills failed @@ -444,5 +444,5 @@ void* unified_cache_refill(int class_idx) { g_unified_cache_miss[class_idx]++; #endif - return first; // Return first block (BASE pointer) + return HAK_BASE_FROM_RAW(first); // Return first block (BASE pointer) } diff --git a/core/front/tiny_unified_cache.h b/core/front/tiny_unified_cache.h index 08ceb5ff..0b0eeb75 100644 --- a/core/front/tiny_unified_cache.h +++ b/core/front/tiny_unified_cache.h @@ -27,6 +27,7 @@ #include #include "../hakmem_build_flags.h" #include "../hakmem_tiny_config.h" // For TINY_NUM_CLASSES +#include "../box/ptr_type_box.h" // Phantom pointer types (BASE/USER) #include "../box/tiny_front_config_box.h" // Phase 8-Step1: Config macros // ============================================================================ @@ -34,7 +35,9 @@ // ============================================================================ typedef struct { - void** slots; // Dynamic array (allocated at init, power-of-2 size) + // slots は BASE ポインタ群を保持する(ユーザポインタではない)。 + // API では hak_base_ptr_t で型安全に扱い、内部表現は void* のまま。 + void** slots; // Dynamic array of BASE pointers (allocated at init) uint16_t head; // Pop index (consumer) uint16_t tail; // Push index (producer) uint16_t capacity; // Cache size (power of 2 for fast modulo: & (capacity-1)) @@ -122,12 +125,13 @@ void* unified_cache_refill(int class_idx); // ============================================================================ // Pop from unified cache (alloc fast path) -// Returns: BASE pointer (caller must convert to USER with +1) -static inline void* unified_cache_pop(int class_idx) { +// Returns: BASE pointer (wrapped hak_base_ptr_t; callerがUSERへ変換) +static inline hak_base_ptr_t unified_cache_pop(int class_idx) { // Phase 8-Step1: Use config macro for dead code elimination in PGO mode // Fast path: Unified cache disabled → return NULL immediately #include "../box/tiny_front_config_box.h" - if (__builtin_expect(!TINY_FRONT_UNIFIED_CACHE_ENABLED, 0)) return NULL; + if (__builtin_expect(!TINY_FRONT_UNIFIED_CACHE_ENABLED, 0)) + return HAK_BASE_FROM_RAW(NULL); TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS) @@ -138,7 +142,8 @@ static inline void* unified_cache_pop(int class_idx) { if (__builtin_expect(cache->slots == NULL, 0)) { unified_cache_init(); // First call in this thread // Re-check after init (may fail if allocation failed) - if (cache->slots == NULL) return NULL; + if (cache->slots == NULL) + return HAK_BASE_FROM_RAW(NULL); } #endif @@ -147,7 +152,7 @@ static inline void* unified_cache_pop(int class_idx) { #if !HAKMEM_BUILD_RELEASE g_unified_cache_miss[class_idx]++; #endif - return NULL; // Empty + return HAK_BASE_FROM_RAW(NULL); // Empty } // Pop from head (consumer) @@ -158,18 +163,19 @@ static inline void* unified_cache_pop(int class_idx) { g_unified_cache_hit[class_idx]++; #endif - return base; // Return BASE pointer (2-3 cache misses total) + return HAK_BASE_FROM_RAW(base); // Return BASE pointer (2-3 cache misses total) } // Push to unified cache (free fast path) -// Input: BASE pointer (caller must pass BASE, not USER) +// Input: BASE pointer (wrapped hak_base_ptr_t; caller must pass BASE, not USER) // Returns: 1=SUCCESS, 0=FULL -static inline int unified_cache_push(int class_idx, void* base) { +static inline int unified_cache_push(int class_idx, hak_base_ptr_t base) { // Phase 8-Step1: Use config macro for dead code elimination in PGO mode // Fast path: Unified cache disabled → return 0 (not handled) if (__builtin_expect(!TINY_FRONT_UNIFIED_CACHE_ENABLED, 0)) return 0; TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS) + void* base_raw = HAK_BASE_TO_RAW(base); // Phase 8-Step3: Lazy init check (conditional in PGO mode) // PGO builds assume bench_fast_init() prewarmed cache → remove check (-1 branch) @@ -193,7 +199,7 @@ static inline int unified_cache_push(int class_idx, void* base) { } // Push to tail (producer) - cache->slots[cache->tail] = base; // 1 cache miss (array write) + cache->slots[cache->tail] = base_raw; // 1 cache miss (array write) cache->tail = next_tail; #if !HAKMEM_BUILD_RELEASE @@ -208,12 +214,13 @@ static inline int unified_cache_push(int class_idx, void* base) { // ============================================================================ // All-in-one: Pop from cache, or refill from SuperSlab on miss -// Returns: BASE pointer (caller converts to USER), or NULL if failed +// Returns: BASE pointer (wrapped hak_base_ptr_t), or NULL-wrapped if failed // Design: Self-contained, bypasses all other frontend layers (Ring/FC/SFC/SLL) -static inline void* unified_cache_pop_or_refill(int class_idx) { +static inline hak_base_ptr_t unified_cache_pop_or_refill(int class_idx) { // Phase 8-Step1: Use config macro for dead code elimination in PGO mode - // Fast path: Unified cache disabled → return NULL (caller uses legacy cascade) - if (__builtin_expect(!TINY_FRONT_UNIFIED_CACHE_ENABLED, 0)) return NULL; + // Fast path: Unified cache disabled → NULL-wrapped (caller uses legacy cascade) + if (__builtin_expect(!TINY_FRONT_UNIFIED_CACHE_ENABLED, 0)) + return HAK_BASE_FROM_RAW(NULL); TinyUnifiedCache* cache = &g_unified_cache[class_idx]; // 1 cache miss (TLS) @@ -223,7 +230,8 @@ static inline void* unified_cache_pop_or_refill(int class_idx) { // Lazy init check (once per thread, per class) if (__builtin_expect(cache->slots == NULL, 0)) { unified_cache_init(); - if (cache->slots == NULL) return NULL; + if (cache->slots == NULL) + return HAK_BASE_FROM_RAW(NULL); } #endif @@ -234,14 +242,14 @@ static inline void* unified_cache_pop_or_refill(int class_idx) { #if !HAKMEM_BUILD_RELEASE g_unified_cache_hit[class_idx]++; #endif - return base; // Hit! (2-3 cache misses total) + return HAK_BASE_FROM_RAW(base); // Hit! (2-3 cache misses total) } // Cache miss → Batch refill from SuperSlab #if !HAKMEM_BUILD_RELEASE g_unified_cache_miss[class_idx]++; #endif - return unified_cache_refill(class_idx); // Refill + return first block + return unified_cache_refill(class_idx); // Refill + return first block (BASE) } #endif // HAK_FRONT_TINY_UNIFIED_CACHE_H