#ifndef HAKMEM_TINY_BG_SPILL_H #define HAKMEM_TINY_BG_SPILL_H #include #include #include #include "box/tiny_next_ptr_box.h" // Phase E1-CORRECT: unified next pointer API // Forward declarations typedef struct TinySlab TinySlab; typedef struct SuperSlab SuperSlab; #define TINY_NUM_CLASSES 8 // Background spill queue: Lock-free queue for returning blocks to SuperSlab // Allows free() hot path to defer expensive SuperSlab freelist operations extern int g_bg_spill_enable; extern int g_bg_spill_target; extern int g_bg_spill_max_batch; extern _Atomic uintptr_t g_bg_spill_head[TINY_NUM_CLASSES]; extern _Atomic uint32_t g_bg_spill_len[TINY_NUM_CLASSES]; // Push single block to spill queue (inline for hot path) static inline void bg_spill_push_one(int class_idx, void* p) { uintptr_t old_head; do { old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire); tiny_next_write(class_idx, p, (void*)old_head); } while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head, (uintptr_t)p, memory_order_release, memory_order_relaxed)); atomic_fetch_add_explicit(&g_bg_spill_len[class_idx], 1u, memory_order_relaxed); } // Push pre-linked chain to spill queue (inline for hot path) static inline void bg_spill_push_chain(int class_idx, void* head, void* tail, int count) { uintptr_t old_head; do { old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire); tiny_next_write(class_idx, tail, (void*)old_head); } while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head, (uintptr_t)head, memory_order_release, memory_order_relaxed)); atomic_fetch_add_explicit(&g_bg_spill_len[class_idx], (uint32_t)count, memory_order_relaxed); } // Initialize bg_spill module from environment variables void bg_spill_init(void); // Drain spill queue for a single class (called by background thread) void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock); #endif // HAKMEM_TINY_BG_SPILL_H