Files
hakmem/core/hakmem_tiny_bg_spill.h
Moe Charm (CI) 52386401b3 Debug Counters Implementation - Clean History
Major Features:
- Debug counter infrastructure for Refill Stage tracking
- Free Pipeline counters (ss_local, ss_remote, tls_sll)
- Diagnostic counters for early return analysis
- Unified larson.sh benchmark runner with profiles
- Phase 6-3 regression analysis documentation

Bug Fixes:
- Fix SuperSlab disabled by default (HAKMEM_TINY_USE_SUPERSLAB)
- Fix profile variable naming consistency
- Add .gitignore patterns for large files

Performance:
- Phase 6-3: 4.79 M ops/s (has OOM risk)
- With SuperSlab: 3.13 M ops/s (+19% improvement)

This is a clean repository without large log files.

🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-05 12:31:14 +09:00

53 lines
2.1 KiB
C

#ifndef HAKMEM_TINY_BG_SPILL_H
#define HAKMEM_TINY_BG_SPILL_H
#include <stdatomic.h>
#include <stdint.h>
#include <pthread.h>
// Forward declarations
typedef struct TinySlab TinySlab;
typedef struct SuperSlab SuperSlab;
#define TINY_NUM_CLASSES 8
// Background spill queue: Lock-free queue for returning blocks to SuperSlab
// Allows free() hot path to defer expensive SuperSlab freelist operations
extern int g_bg_spill_enable;
extern int g_bg_spill_target;
extern int g_bg_spill_max_batch;
extern _Atomic uintptr_t g_bg_spill_head[TINY_NUM_CLASSES];
extern _Atomic uint32_t g_bg_spill_len[TINY_NUM_CLASSES];
// Push single block to spill queue (inline for hot path)
static inline void bg_spill_push_one(int class_idx, void* p) {
uintptr_t old_head;
do {
old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire);
*(void**)p = (void*)old_head;
} while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head,
(uintptr_t)p,
memory_order_release, memory_order_relaxed));
atomic_fetch_add_explicit(&g_bg_spill_len[class_idx], 1u, memory_order_relaxed);
}
// Push pre-linked chain to spill queue (inline for hot path)
static inline void bg_spill_push_chain(int class_idx, void* head, void* tail, int count) {
uintptr_t old_head;
do {
old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire);
*(void**)tail = (void*)old_head;
} while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head,
(uintptr_t)head,
memory_order_release, memory_order_relaxed));
atomic_fetch_add_explicit(&g_bg_spill_len[class_idx], (uint32_t)count, memory_order_relaxed);
}
// Initialize bg_spill module from environment variables
void bg_spill_init(void);
// Drain spill queue for a single class (called by background thread)
void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock);
#endif // HAKMEM_TINY_BG_SPILL_H