Files
hakmem/core/box/ss_cache_box.c
Moe Charm (CI) 9b0d746407 Phase 3d-B: TLS Cache Merge - Unified g_tls_sll[] structure (+12-18% expected)
Merge separate g_tls_sll_head[] and g_tls_sll_count[] arrays into unified
TinyTLSSLL struct to improve L1D cache locality. Expected performance gain:
+12-18% from reducing cache line splits (2 loads → 1 load per operation).

Changes:
- core/hakmem_tiny.h: Add TinyTLSSLL type (16B aligned, head+count+pad)
- core/hakmem_tiny.c: Replace separate arrays with g_tls_sll[8]
- core/box/tls_sll_box.h: Update Box API (13 sites) for unified access
- Updated 32+ files: All g_tls_sll_head[i] → g_tls_sll[i].head
- Updated 32+ files: All g_tls_sll_count[i] → g_tls_sll[i].count
- core/hakmem_tiny_integrity.h: Unified canary guards
- core/box/integrity_box.c: Simplified canary validation
- Makefile: Added core/box/tiny_sizeclass_hist_box.o to link

Build:  PASS (10K ops sanity test)
Warnings: Only pre-existing LTO type mismatches (unrelated)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-20 07:32:30 +09:00

206 lines
7.0 KiB
C

// ss_cache_box.c - SuperSlab Cache Management Box Implementation
#include "ss_cache_box.h"
#include "ss_os_acquire_box.h"
#include "ss_stats_box.h"
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
// ============================================================================
// Cache Entry Type (internal)
// ============================================================================
typedef struct SuperslabCacheEntry {
struct SuperslabCacheEntry* next;
} SuperslabCacheEntry;
// ============================================================================
// Cache State (per-class)
// ============================================================================
static SuperslabCacheEntry* g_ss_cache_head[8] = {0};
static size_t g_ss_cache_count[8] = {0};
size_t g_ss_cache_cap[8] = {0}; // Exported for ss_allocation_box.c
size_t g_ss_precharge_target[8] = {0}; // Exported for ss_allocation_box.c
static _Atomic int g_ss_precharge_done[8] = {0};
static int g_ss_cache_enabled = 0;
static pthread_once_t g_ss_cache_once = PTHREAD_ONCE_INIT;
static pthread_mutex_t g_ss_cache_lock[8];
// ============================================================================
// Cache Statistics
// ============================================================================
uint64_t g_ss_cache_hits[8] = {0};
uint64_t g_ss_cache_misses[8] = {0};
uint64_t g_ss_cache_puts[8] = {0};
uint64_t g_ss_cache_drops[8] = {0};
uint64_t g_ss_cache_precharged[8] = {0};
// ============================================================================
// Cache Initialization
// ============================================================================
static void ss_cache_global_init(void) {
for (int i = 0; i < 8; i++) {
pthread_mutex_init(&g_ss_cache_lock[i], NULL);
}
}
void ss_cache_ensure_init(void) {
pthread_once(&g_ss_cache_once, ss_cache_global_init);
}
// ============================================================================
// Cache Operations
// ============================================================================
void* ss_cache_pop(uint8_t size_class) {
if (!g_ss_cache_enabled) return NULL;
if (size_class >= 8) return NULL;
ss_cache_ensure_init();
pthread_mutex_lock(&g_ss_cache_lock[size_class]);
SuperslabCacheEntry* entry = g_ss_cache_head[size_class];
if (entry) {
g_ss_cache_head[size_class] = entry->next;
if (g_ss_cache_count[size_class] > 0) {
g_ss_cache_count[size_class]--;
}
entry->next = NULL;
g_ss_cache_hits[size_class]++;
} else {
g_ss_cache_misses[size_class]++;
}
pthread_mutex_unlock(&g_ss_cache_lock[size_class]);
return (void*)entry;
}
int ss_cache_push(uint8_t size_class, SuperSlab* ss) {
if (!g_ss_cache_enabled) return 0;
if (size_class >= 8) return 0;
ss_cache_ensure_init();
pthread_mutex_lock(&g_ss_cache_lock[size_class]);
size_t cap = g_ss_cache_cap[size_class];
if (cap != 0 && g_ss_cache_count[size_class] >= cap) {
g_ss_cache_drops[size_class]++;
pthread_mutex_unlock(&g_ss_cache_lock[size_class]);
return 0;
}
SuperslabCacheEntry* entry = (SuperslabCacheEntry*)ss;
entry->next = g_ss_cache_head[size_class];
g_ss_cache_head[size_class] = entry;
g_ss_cache_count[size_class]++;
g_ss_cache_puts[size_class]++;
pthread_mutex_unlock(&g_ss_cache_lock[size_class]);
return 1;
}
void ss_cache_precharge(uint8_t size_class, size_t ss_size, uintptr_t ss_mask) {
if (!g_ss_cache_enabled) return;
if (size_class >= 8) return;
if (g_ss_precharge_target[size_class] == 0) return;
if (atomic_load_explicit(&g_ss_precharge_done[size_class], memory_order_acquire)) return;
ss_cache_ensure_init();
pthread_mutex_lock(&g_ss_cache_lock[size_class]);
size_t target = g_ss_precharge_target[size_class];
size_t cap = g_ss_cache_cap[size_class];
size_t desired = target;
if (cap != 0 && desired > cap) {
desired = cap;
}
while (g_ss_cache_count[size_class] < desired) {
void* raw = ss_os_acquire(size_class, ss_size, ss_mask, 1);
if (!raw) {
break;
}
SuperslabCacheEntry* entry = (SuperslabCacheEntry*)raw;
entry->next = g_ss_cache_head[size_class];
g_ss_cache_head[size_class] = entry;
g_ss_cache_count[size_class]++;
g_ss_cache_precharged[size_class]++;
}
atomic_store_explicit(&g_ss_precharge_done[size_class], 1, memory_order_release);
pthread_mutex_unlock(&g_ss_cache_lock[size_class]);
}
// ============================================================================
// Runtime Tuning API
// ============================================================================
void tiny_ss_cache_set_class_cap(int class_idx, size_t new_cap) {
if (class_idx < 0 || class_idx >= 8) {
return;
}
ss_cache_ensure_init();
pthread_mutex_lock(&g_ss_cache_lock[class_idx]);
size_t old_cap = g_ss_cache_cap[class_idx];
g_ss_cache_cap[class_idx] = new_cap;
// If shrinking cap, drop extra cached superslabs (oldest from head) and munmap them.
if (new_cap == 0 || new_cap < old_cap) {
while (g_ss_cache_count[class_idx] > new_cap) {
SuperslabCacheEntry* entry = g_ss_cache_head[class_idx];
if (!entry) {
g_ss_cache_count[class_idx] = 0;
break;
}
g_ss_cache_head[class_idx] = entry->next;
g_ss_cache_count[class_idx]--;
g_ss_cache_drops[class_idx]++;
// Convert cache entry back to SuperSlab* and release it to OS.
SuperSlab* ss = (SuperSlab*)entry;
size_t ss_size = (size_t)1 << ss->lg_size;
munmap((void*)ss, ss_size);
// Update global stats to keep accounting consistent.
extern pthread_mutex_t g_superslab_lock; // From ss_stats_box.c
pthread_mutex_lock(&g_superslab_lock);
g_superslabs_freed++;
if (g_bytes_allocated >= ss_size) {
g_bytes_allocated -= ss_size;
} else {
g_bytes_allocated = 0;
}
pthread_mutex_unlock(&g_superslab_lock);
}
}
pthread_mutex_unlock(&g_ss_cache_lock[class_idx]);
// Recompute cache enabled flag (8 classes, so O(8) is cheap)
int enabled = 0;
for (int i = 0; i < 8; i++) {
if (g_ss_cache_cap[i] > 0 || g_ss_precharge_target[i] > 0) {
enabled = 1;
break;
}
}
g_ss_cache_enabled = enabled;
}
void tiny_ss_precharge_set_class_target(int class_idx, size_t target) {
if (class_idx < 0 || class_idx >= 8) {
return;
}
ss_cache_ensure_init();
pthread_mutex_lock(&g_ss_cache_lock[class_idx]);
g_ss_precharge_target[class_idx] = target;
if (target > 0) {
g_ss_cache_enabled = 1;
atomic_store_explicit(&g_ss_precharge_done[class_idx], 0, memory_order_relaxed);
}
pthread_mutex_unlock(&g_ss_cache_lock[class_idx]);
}