2025-11-05 12:31:14 +09:00
|
|
|
#ifndef HAKMEM_TINY_BG_SPILL_H
|
|
|
|
|
#define HAKMEM_TINY_BG_SPILL_H
|
|
|
|
|
|
|
|
|
|
#include <stdatomic.h>
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
|
|
// Forward declarations
|
|
|
|
|
typedef struct TinySlab TinySlab;
|
|
|
|
|
typedef struct SuperSlab SuperSlab;
|
|
|
|
|
|
|
|
|
|
#define TINY_NUM_CLASSES 8
|
|
|
|
|
|
|
|
|
|
// Background spill queue: Lock-free queue for returning blocks to SuperSlab
|
|
|
|
|
// Allows free() hot path to defer expensive SuperSlab freelist operations
|
|
|
|
|
extern int g_bg_spill_enable;
|
|
|
|
|
extern int g_bg_spill_target;
|
|
|
|
|
extern int g_bg_spill_max_batch;
|
|
|
|
|
extern _Atomic uintptr_t g_bg_spill_head[TINY_NUM_CLASSES];
|
|
|
|
|
extern _Atomic uint32_t g_bg_spill_len[TINY_NUM_CLASSES];
|
|
|
|
|
|
|
|
|
|
// Push single block to spill queue (inline for hot path)
|
|
|
|
|
static inline void bg_spill_push_one(int class_idx, void* p) {
|
|
|
|
|
uintptr_t old_head;
|
|
|
|
|
do {
|
|
|
|
|
old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire);
|
2025-11-10 18:21:32 +09:00
|
|
|
// Phase 7: header-aware next placement (C0-C6: base+1, C7: base)
|
|
|
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
|
|
|
const size_t next_off = (class_idx == 7) ? 0 : 1;
|
|
|
|
|
#else
|
|
|
|
|
const size_t next_off = 0;
|
|
|
|
|
#endif
|
|
|
|
|
*(void**)((uint8_t*)p + next_off) = (void*)old_head;
|
2025-11-05 12:31:14 +09:00
|
|
|
} while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head,
|
|
|
|
|
(uintptr_t)p,
|
|
|
|
|
memory_order_release, memory_order_relaxed));
|
|
|
|
|
atomic_fetch_add_explicit(&g_bg_spill_len[class_idx], 1u, memory_order_relaxed);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Push pre-linked chain to spill queue (inline for hot path)
|
|
|
|
|
static inline void bg_spill_push_chain(int class_idx, void* head, void* tail, int count) {
|
|
|
|
|
uintptr_t old_head;
|
|
|
|
|
do {
|
|
|
|
|
old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire);
|
2025-11-10 18:21:32 +09:00
|
|
|
// Phase 7: header-aware next placement for tail link
|
|
|
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
|
|
|
const size_t next_off = (class_idx == 7) ? 0 : 1;
|
|
|
|
|
#else
|
|
|
|
|
const size_t next_off = 0;
|
|
|
|
|
#endif
|
|
|
|
|
*(void**)((uint8_t*)tail + next_off) = (void*)old_head;
|
2025-11-05 12:31:14 +09:00
|
|
|
} while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head,
|
|
|
|
|
(uintptr_t)head,
|
|
|
|
|
memory_order_release, memory_order_relaxed));
|
|
|
|
|
atomic_fetch_add_explicit(&g_bg_spill_len[class_idx], (uint32_t)count, memory_order_relaxed);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Initialize bg_spill module from environment variables
|
|
|
|
|
void bg_spill_init(void);
|
|
|
|
|
|
|
|
|
|
// Drain spill queue for a single class (called by background thread)
|
|
|
|
|
void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock);
|
|
|
|
|
|
|
|
|
|
#endif // HAKMEM_TINY_BG_SPILL_H
|