/** * hakmem_smallmid.c - Small-Mid Allocator Front Box Implementation * * Phase 17-1: Front Box Only (No Dedicated SuperSlab Backend) * * Strategy (ChatGPT reviewed): * - Thin front layer with TLS freelist (256B/512B/1KB) * - Backend: Use existing Tiny SuperSlab/SharedPool APIs * - Goal: Measure performance impact before building dedicated backend * - A/B test: Does Small-Mid front improve 256-1KB performance? * * Architecture: * - 3 size classes: 256B/512B/1KB (reduced from 5) * - TLS freelist for fast alloc/free (static inline) * - Backend: Call Tiny allocator APIs (reuse existing infrastructure) * - ENV controlled (HAKMEM_SMALLMID_ENABLE=1) * * Created: 2025-11-16 * Updated: 2025-11-16 (Phase 17-1 revision - Front Box only) */ #include "hakmem_smallmid.h" #include "hakmem_build_flags.h" #include "hakmem_tiny.h" // For backend: hak_tiny_alloc / hak_tiny_free #include "tiny_region_id.h" // For header writing #include #include // ============================================================================ // TLS State // ============================================================================ __thread void* g_smallmid_tls_head[SMALLMID_NUM_CLASSES] = {NULL}; __thread uint32_t g_smallmid_tls_count[SMALLMID_NUM_CLASSES] = {0}; // ============================================================================ // Size Class Table (Phase 17-1: 3 classes) // ============================================================================ const size_t g_smallmid_class_sizes[SMALLMID_NUM_CLASSES] = { 256, // SM0: 256B 512, // SM1: 512B 1024 // SM2: 1KB }; // ============================================================================ // Global State // ============================================================================ static pthread_mutex_t g_smallmid_init_lock = PTHREAD_MUTEX_INITIALIZER; static int g_smallmid_initialized = 0; static int g_smallmid_enabled = -1; // -1 = not checked, 0 = disabled, 1 = enabled // ============================================================================ // Statistics (Debug) // ============================================================================ #ifdef HAKMEM_SMALLMID_STATS SmallMidStats g_smallmid_stats = {0}; void smallmid_print_stats(void) { fprintf(stderr, "\n=== Small-Mid Allocator Statistics ===\n"); fprintf(stderr, "Total allocs: %lu\n", g_smallmid_stats.total_allocs); fprintf(stderr, "Total frees: %lu\n", g_smallmid_stats.total_frees); fprintf(stderr, "TLS hits: %lu\n", g_smallmid_stats.tls_hits); fprintf(stderr, "TLS misses: %lu\n", g_smallmid_stats.tls_misses); fprintf(stderr, "SuperSlab refills: %lu\n", g_smallmid_stats.superslab_refills); if (g_smallmid_stats.total_allocs > 0) { double hit_rate = (double)g_smallmid_stats.tls_hits / g_smallmid_stats.total_allocs * 100.0; fprintf(stderr, "TLS hit rate: %.2f%%\n", hit_rate); } fprintf(stderr, "=======================================\n\n"); } #endif // ============================================================================ // ENV Control // ============================================================================ bool smallmid_is_enabled(void) { if (__builtin_expect(g_smallmid_enabled == -1, 0)) { const char* env = getenv("HAKMEM_SMALLMID_ENABLE"); g_smallmid_enabled = (env && atoi(env) == 1) ? 1 : 0; if (g_smallmid_enabled) { SMALLMID_LOG("Small-Mid allocator ENABLED (ENV: HAKMEM_SMALLMID_ENABLE=1)"); } else { SMALLMID_LOG("Small-Mid allocator DISABLED (default, set HAKMEM_SMALLMID_ENABLE=1 to enable)"); } } return (g_smallmid_enabled == 1); } // ============================================================================ // Initialization // ============================================================================ void smallmid_init(void) { if (g_smallmid_initialized) return; pthread_mutex_lock(&g_smallmid_init_lock); if (!g_smallmid_initialized) { SMALLMID_LOG("Initializing Small-Mid Front Box..."); // Check ENV if (!smallmid_is_enabled()) { SMALLMID_LOG("Small-Mid allocator is disabled, skipping initialization"); g_smallmid_initialized = 1; pthread_mutex_unlock(&g_smallmid_init_lock); return; } // Phase 17-1: No dedicated backend - use existing Tiny infrastructure // No additional initialization needed (TLS state is static) g_smallmid_initialized = 1; SMALLMID_LOG("Small-Mid Front Box initialized (3 classes: 256B/512B/1KB, backend=Tiny)"); } pthread_mutex_unlock(&g_smallmid_init_lock); } // ============================================================================ // TLS Freelist Operations // ============================================================================ /** * smallmid_tls_pop - Pop a block from TLS freelist * * @param class_idx Size class index * @return Block pointer (with header), or NULL if empty */ static inline void* smallmid_tls_pop(int class_idx) { void* head = g_smallmid_tls_head[class_idx]; if (!head) return NULL; // Read next pointer (stored at offset 0 in user data, after 1-byte header) void* next = *(void**)((uint8_t*)head + 1); g_smallmid_tls_head[class_idx] = next; g_smallmid_tls_count[class_idx]--; #ifdef HAKMEM_SMALLMID_STATS __atomic_fetch_add(&g_smallmid_stats.tls_hits, 1, __ATOMIC_RELAXED); #endif return head; } /** * smallmid_tls_push - Push a block to TLS freelist * * @param class_idx Size class index * @param ptr Block pointer (with header) * @return true on success, false if TLS full */ static inline bool smallmid_tls_push(int class_idx, void* ptr) { uint32_t capacity = smallmid_tls_capacity(class_idx); if (g_smallmid_tls_count[class_idx] >= capacity) { return false; // TLS full } // Write next pointer (at offset 0 in user data, after 1-byte header) void* head = g_smallmid_tls_head[class_idx]; *(void**)((uint8_t*)ptr + 1) = head; g_smallmid_tls_head[class_idx] = ptr; g_smallmid_tls_count[class_idx]++; return true; } // ============================================================================ // Backend Delegation (Phase 17-1: Reuse Tiny infrastructure) // ============================================================================ /** * smallmid_backend_alloc - Allocate from Tiny backend and convert header * * @param size Allocation size (256-1024) * @return User pointer with Small-Mid header (0xb0), or NULL on failure * * Strategy: * - Call Tiny allocator (handles C5/C6/C7 = 256B/512B/1KB) * - Tiny writes header: 0xa5/0xa6/0xa7 * - Overwrite with Small-Mid header: 0xb0/0xb1/0xb2 */ static void* smallmid_backend_alloc(size_t size) { #ifdef HAKMEM_SMALLMID_STATS __atomic_fetch_add(&g_smallmid_stats.tls_misses, 1, __ATOMIC_RELAXED); __atomic_fetch_add(&g_smallmid_stats.superslab_refills, 1, __ATOMIC_RELAXED); #endif // Call Tiny allocator void* ptr = hak_tiny_alloc(size); if (!ptr) { SMALLMID_LOG("smallmid_backend_alloc(%zu): Tiny allocation failed", size); return NULL; } // Overwrite header: Tiny (0xa0 | tiny_class) → Small-Mid (0xb0 | sm_class) // Tiny class mapping: C5=256B, C6=512B, C7=1KB // Small-Mid class mapping: SM0=256B, SM1=512B, SM2=1KB uint8_t* base = (uint8_t*)ptr - 1; uint8_t tiny_header = *base; uint8_t tiny_class = tiny_header & 0x0f; // Convert Tiny class (5/6/7) to Small-Mid class (0/1/2) int sm_class = tiny_class - 5; if (sm_class < 0 || sm_class >= SMALLMID_NUM_CLASSES) { // Should never happen - Tiny allocated wrong class SMALLMID_LOG("smallmid_backend_alloc(%zu): Invalid Tiny class %d", size, tiny_class); // Revert header and free hak_tiny_free(ptr); return NULL; } // Write Small-Mid header *base = 0xb0 | sm_class; SMALLMID_LOG("smallmid_backend_alloc(%zu) = %p (Tiny C%d → SM C%d)", size, ptr, tiny_class, sm_class); return ptr; } /** * smallmid_backend_free - Convert header and delegate to Tiny backend * * @param ptr User pointer (must have Small-Mid header 0xb0) * @param size Allocation size (unused, Tiny reads header) * * Strategy: * - Convert header: Small-Mid (0xb0 | sm_class) → Tiny (0xa0 | tiny_class) * - Call Tiny free to handle deallocation */ static void smallmid_backend_free(void* ptr, size_t size) { (void)size; // Unused - Tiny reads size from header // Read Small-Mid header uint8_t* base = (uint8_t*)ptr - 1; uint8_t sm_header = *base; uint8_t sm_class = sm_header & 0x0f; // Convert Small-Mid class (0/1/2) to Tiny class (5/6/7) uint8_t tiny_class = sm_class + 5; // Write Tiny header *base = 0xa0 | tiny_class; SMALLMID_LOG("smallmid_backend_free(%p): SM C%d → Tiny C%d", ptr, sm_class, tiny_class); // Call Tiny free hak_tiny_free(ptr); } // ============================================================================ // Allocation // ============================================================================ void* smallmid_alloc(size_t size) { // Check if enabled if (!smallmid_is_enabled()) { return NULL; // Disabled, fall through to Mid or other allocators } // Initialize if needed if (__builtin_expect(!g_smallmid_initialized, 0)) { smallmid_init(); } // Validate size range if (__builtin_expect(!smallmid_is_in_range(size), 0)) { SMALLMID_LOG("smallmid_alloc: size %zu out of range [%d-%d]", size, SMALLMID_MIN_SIZE, SMALLMID_MAX_SIZE); return NULL; } // Get size class int class_idx = smallmid_size_to_class(size); if (__builtin_expect(class_idx < 0, 0)) { SMALLMID_LOG("smallmid_alloc: invalid class for size %zu", size); return NULL; } #ifdef HAKMEM_SMALLMID_STATS __atomic_fetch_add(&g_smallmid_stats.total_allocs, 1, __ATOMIC_RELAXED); #endif // Fast path: Pop from TLS freelist void* ptr = smallmid_tls_pop(class_idx); if (ptr) { SMALLMID_LOG("smallmid_alloc(%zu) = %p (TLS hit, class=%d)", size, ptr, class_idx); return (uint8_t*)ptr + 1; // Return user pointer (skip header) } // TLS miss: Allocate from Tiny backend // Phase 17-1: Reuse Tiny infrastructure (C5/C6/C7) instead of dedicated SuperSlab ptr = smallmid_backend_alloc(size); if (!ptr) { SMALLMID_LOG("smallmid_alloc(%zu) = NULL (backend failed)", size); return NULL; } SMALLMID_LOG("smallmid_alloc(%zu) = %p (backend alloc, class=%d)", size, ptr, class_idx); return ptr; } // ============================================================================ // Free // ============================================================================ void smallmid_free(void* ptr) { if (!ptr) return; // Check if enabled if (!smallmid_is_enabled()) { return; // Disabled, should not be called } #ifdef HAKMEM_SMALLMID_STATS __atomic_fetch_add(&g_smallmid_stats.total_frees, 1, __ATOMIC_RELAXED); #endif // Phase 17-1: Read header to identify if this is a Small-Mid TLS allocation // or a backend (Tiny) allocation uint8_t* base = (uint8_t*)ptr - 1; uint8_t header = *base; // Small-Mid TLS allocations have magic 0xb0 // Tiny allocations have magic 0xa0 uint8_t magic = header & 0xf0; int class_idx = header & 0x0f; if (magic == 0xb0 && class_idx >= 0 && class_idx < SMALLMID_NUM_CLASSES) { // This is a Small-Mid TLS allocation, push to TLS freelist if (smallmid_tls_push(class_idx, base)) { SMALLMID_LOG("smallmid_free(%p): pushed to TLS (class=%d)", ptr, class_idx); return; } // TLS full: Delegate to Tiny backend SMALLMID_LOG("smallmid_free(%p): TLS full, delegating to backend", ptr); // Fall through to backend free } // This is a backend (Tiny) allocation, or TLS full - delegate to Tiny // Tiny will handle the free based on its own header (0xa0) size_t size = 0; // Tiny free doesn't need size, it reads header smallmid_backend_free(ptr, size); } // ============================================================================ // Thread Cleanup // ============================================================================ void smallmid_thread_exit(void) { if (!smallmid_is_enabled()) return; SMALLMID_LOG("smallmid_thread_exit: cleaning up TLS state"); // Phase 17-1: Return TLS blocks to Tiny backend for (int i = 0; i < SMALLMID_NUM_CLASSES; i++) { void* head = g_smallmid_tls_head[i]; while (head) { void* next = *(void**)((uint8_t*)head + 1); void* user_ptr = (uint8_t*)head + 1; smallmid_backend_free(user_ptr, 0); head = next; } g_smallmid_tls_head[i] = NULL; g_smallmid_tls_count[i] = 0; } }