Major Features: - Debug counter infrastructure for Refill Stage tracking - Free Pipeline counters (ss_local, ss_remote, tls_sll) - Diagnostic counters for early return analysis - Unified larson.sh benchmark runner with profiles - Phase 6-3 regression analysis documentation Bug Fixes: - Fix SuperSlab disabled by default (HAKMEM_TINY_USE_SUPERSLAB) - Fix profile variable naming consistency - Add .gitignore patterns for large files Performance: - Phase 6-3: 4.79 M ops/s (has OOM risk) - With SuperSlab: 3.13 M ops/s (+19% improvement) This is a clean repository without large log files. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
97 lines
3.7 KiB
C
97 lines
3.7 KiB
C
#include "hakmem_tiny_bg_spill.h"
|
|
#include "hakmem_tiny_superslab.h" // For SuperSlab, TinySlabMeta, ss_active_dec_one
|
|
#include "hakmem_super_registry.h" // For hak_super_lookup
|
|
#include "tiny_remote.h"
|
|
#include "hakmem_tiny.h"
|
|
#include <pthread.h>
|
|
|
|
static inline uint32_t tiny_self_u32_guard(void) {
|
|
return (uint32_t)(uintptr_t)pthread_self();
|
|
}
|
|
#include <stdlib.h> // For getenv, atoi
|
|
|
|
// Global variables
|
|
int g_bg_spill_enable = 0; // HAKMEM_TINY_BG_SPILL=1
|
|
int g_bg_spill_target = 128; // HAKMEM_TINY_BG_TARGET (per class)
|
|
int g_bg_spill_max_batch = 128; // HAKMEM_TINY_BG_MAX_BATCH
|
|
_Atomic uintptr_t g_bg_spill_head[TINY_NUM_CLASSES];
|
|
_Atomic uint32_t g_bg_spill_len[TINY_NUM_CLASSES];
|
|
|
|
void bg_spill_init(void) {
|
|
// Parse environment variables
|
|
char* bs = getenv("HAKMEM_TINY_BG_SPILL");
|
|
if (bs) g_bg_spill_enable = (atoi(bs) != 0) ? 1 : 0;
|
|
char* bt2 = getenv("HAKMEM_TINY_BG_TARGET");
|
|
if (bt2) { int v = atoi(bt2); if (v > 0 && v <= 8192) g_bg_spill_target = v; }
|
|
char* mb = getenv("HAKMEM_TINY_BG_MAX_BATCH");
|
|
if (mb) { int v = atoi(mb); if (v > 0 && v <= 4096) g_bg_spill_max_batch = v; }
|
|
|
|
// Initialize atomic queues
|
|
for (int k = 0; k < TINY_NUM_CLASSES; k++) {
|
|
atomic_store_explicit(&g_bg_spill_head[k], (uintptr_t)0, memory_order_relaxed);
|
|
atomic_store_explicit(&g_bg_spill_len[k], 0u, memory_order_relaxed);
|
|
}
|
|
}
|
|
|
|
void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock) {
|
|
uint32_t approx = atomic_load_explicit(&g_bg_spill_len[class_idx], memory_order_relaxed);
|
|
if (approx == 0) return;
|
|
|
|
uintptr_t chain = atomic_exchange_explicit(&g_bg_spill_head[class_idx], (uintptr_t)0, memory_order_acq_rel);
|
|
if (chain == 0) return;
|
|
|
|
// Split chain up to max_batch
|
|
int processed = 0;
|
|
void* rest = NULL;
|
|
void* cur = (void*)chain;
|
|
void* prev = NULL;
|
|
while (cur && processed < g_bg_spill_max_batch) {
|
|
prev = cur;
|
|
cur = *(void**)cur;
|
|
processed++;
|
|
}
|
|
if (cur != NULL) { rest = cur; *(void**)prev = NULL; }
|
|
|
|
// Return processed nodes to SS freelists
|
|
pthread_mutex_lock(lock);
|
|
uint32_t self_tid = tiny_self_u32_guard();
|
|
void* node = (void*)chain;
|
|
while (node) {
|
|
void* next = *(void**)node;
|
|
SuperSlab* owner_ss = hak_super_lookup(node);
|
|
if (owner_ss && owner_ss->magic == SUPERSLAB_MAGIC) {
|
|
int slab_idx = slab_index_for(owner_ss, node);
|
|
TinySlabMeta* meta = &owner_ss->slabs[slab_idx];
|
|
if (!tiny_remote_guard_allow_local_push(owner_ss, slab_idx, meta, node, "bg_spill", self_tid)) {
|
|
(void)ss_remote_push(owner_ss, slab_idx, node);
|
|
if (meta->used > 0) meta->used--;
|
|
node = next;
|
|
continue;
|
|
}
|
|
*(void**)node = meta->freelist;
|
|
meta->freelist = node;
|
|
meta->used--;
|
|
// Active was decremented at free time
|
|
}
|
|
node = next;
|
|
}
|
|
pthread_mutex_unlock(lock);
|
|
|
|
if (processed > 0) {
|
|
atomic_fetch_sub_explicit(&g_bg_spill_len[class_idx], (uint32_t)processed, memory_order_relaxed);
|
|
}
|
|
|
|
if (rest) {
|
|
// Prepend remainder back to head
|
|
uintptr_t old_head;
|
|
void* tail = rest;
|
|
while (*(void**)tail) tail = *(void**)tail;
|
|
do {
|
|
old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire);
|
|
*(void**)tail = (void*)old_head;
|
|
} while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head,
|
|
(uintptr_t)rest,
|
|
memory_order_release, memory_order_relaxed));
|
|
}
|
|
}
|