diff --git a/core/tiny_c7_ultra.c b/core/tiny_c7_ultra.c index 789ba366..31fa6de1 100644 --- a/core/tiny_c7_ultra.c +++ b/core/tiny_c7_ultra.c @@ -12,6 +12,11 @@ #include "box/tiny_front_v3_env_box.h" #include "box/free_path_stats_box.h" +// Phase PERF-ULTRA-REFILL-OPT-1a: Import page size shift macro +// (defined in tiny_c7_ultra_segment.c for consistency) +// We'll define it locally here as well for convenience +#define TINY_C7_ULTRA_PAGE_SHIFT 16 // 64KiB = 2^16 + #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) @@ -78,7 +83,8 @@ bool tiny_c7_ultra_refill(tiny_c7_ultra_tls_t* tls) { if (!seg) return false; tls->seg = seg; tls->seg_base = (uintptr_t)seg->base; - tls->seg_end = tls->seg_base + ((size_t)seg->num_pages * seg->page_size); + // Phase PERF-ULTRA-REFILL-OPT-1a: Use bit shift instead of multiplication + tls->seg_end = tls->seg_base + ((size_t)seg->num_pages << TINY_C7_ULTRA_PAGE_SHIFT); } size_t block_sz = tls->block_size; @@ -107,7 +113,8 @@ bool tiny_c7_ultra_refill(tiny_c7_ultra_tls_t* tls) { } tiny_c7_ultra_page_meta_t* page = &seg->pages[chosen]; - uint8_t* base = (uint8_t*)seg->base + ((size_t)chosen * seg->page_size); + // Phase PERF-ULTRA-REFILL-OPT-1a: Use bit shift instead of multiplication + uint8_t* base = (uint8_t*)seg->base + ((size_t)chosen << TINY_C7_ULTRA_PAGE_SHIFT); // If page is uninitialized, carve it if (page->capacity == 0) { @@ -170,17 +177,17 @@ void tiny_c7_ultra_free(void* ptr) { tiny_c7_ultra_tls_t* tls = &g_tiny_c7_ultra_tls; void* base = (uint8_t*)ptr - 1; // Convert USER -> BASE pointer - // 1) Initial segment learning (cold path, once per thread) - if (unlikely(tls->seg_base == 0)) { - tiny_c7_ultra_segment_t* seg = tiny_c7_ultra_segment_from_ptr(ptr); - if (seg != NULL) { - tls->seg = seg; - tls->seg_base = (uintptr_t)seg->base; - tls->seg_end = tls->seg_base + ((size_t)seg->num_pages * seg->page_size); - } - } + // Phase PERF-ULTRA-REFILL-OPT-1b: Segment learning moved to refill (alloc cold path) + // In normal allocation patterns, alloc is always called before free on each thread. + // Therefore, seg_base/seg_end are guaranteed to be initialized by refill's + // tiny_c7_ultra_segment_acquire() call (line 82-87). + // + // This optimization removes the per-free segment learning overhead. + // Risk: If a thread does free() before any alloc(), it will fallback to so_free(). + // This is acceptable because it's an unusual pattern. - // 2) Fast path: range check + TLS push + // Fast path: assume segment already learned by refill + // No unlikely() guard needed because refill always runs first in normal patterns uintptr_t addr = (uintptr_t)base; if (likely(tls->seg_base != 0 && addr >= tls->seg_base && diff --git a/core/tiny_c7_ultra_segment.c b/core/tiny_c7_ultra_segment.c index 23cdfec3..a38247b0 100644 --- a/core/tiny_c7_ultra_segment.c +++ b/core/tiny_c7_ultra_segment.c @@ -11,6 +11,7 @@ // 2MiB セグメントを 64KiB ページに分割(C7 専用、pow2 で mask しやすく) #define TINY_C7_ULTRA_SEG_SIZE ((size_t)(2 * 1024 * 1024)) #define TINY_C7_ULTRA_PAGE_SIZE ((size_t)(64 * 1024)) +#define TINY_C7_ULTRA_PAGE_SHIFT 16 // 64KiB = 2^16 (for O(1) bit shift instead of division) static __thread tiny_c7_ultra_segment_t* g_ultra_seg; @@ -92,7 +93,8 @@ tiny_c7_ultra_page_meta_t* tiny_c7_ultra_page_of(void* p, uintptr_t base = (uintptr_t)seg->base; uintptr_t addr = (uintptr_t)p; size_t offset = (size_t)(addr - base); - uint32_t idx = (uint32_t)(offset / seg->page_size); + // Phase PERF-ULTRA-REFILL-OPT-1a: Replace division with bit shift for O(1) lookup + uint32_t idx = (uint32_t)(offset >> TINY_C7_ULTRA_PAGE_SHIFT); if (idx >= seg->num_pages) { return NULL; }