Phase 3d-B: TLS Cache Merge - Unified g_tls_sll[] structure (+12-18% expected)
Merge separate g_tls_sll_head[] and g_tls_sll_count[] arrays into unified TinyTLSSLL struct to improve L1D cache locality. Expected performance gain: +12-18% from reducing cache line splits (2 loads → 1 load per operation). Changes: - core/hakmem_tiny.h: Add TinyTLSSLL type (16B aligned, head+count+pad) - core/hakmem_tiny.c: Replace separate arrays with g_tls_sll[8] - core/box/tls_sll_box.h: Update Box API (13 sites) for unified access - Updated 32+ files: All g_tls_sll_head[i] → g_tls_sll[i].head - Updated 32+ files: All g_tls_sll_count[i] → g_tls_sll[i].count - core/hakmem_tiny_integrity.h: Unified canary guards - core/box/integrity_box.c: Simplified canary validation - Makefile: Added core/box/tiny_sizeclass_hist_box.o to link Build: ✅ PASS (10K ops sanity test) Warnings: Only pre-existing LTO type mismatches (unrelated) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
66
core/hakmem_tiny_unified_stats.c
Normal file
66
core/hakmem_tiny_unified_stats.c
Normal file
@ -0,0 +1,66 @@
|
||||
#include "hakmem_tiny_unified_stats.h"
|
||||
#include <stdatomic.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
// グローバル集計(全スレッド分を Atomic で集約)
|
||||
static _Atomic uint64_t g_tiny_unified_hit[TINY_NUM_CLASSES];
|
||||
static _Atomic uint64_t g_tiny_unified_miss[TINY_NUM_CLASSES];
|
||||
|
||||
// サンプリング制御
|
||||
// g_sample_mask == 0 → 統計 OFF
|
||||
// mask=(1<<n)-1 → 2^n 回に 1 回サンプル
|
||||
static _Atomic uint32_t g_sample_mask = 0;
|
||||
static _Atomic uint64_t g_seq = 0;
|
||||
|
||||
void hak_tiny_unified_stats_init(void) {
|
||||
const char* env = getenv("HAKMEM_TINY_UNIFIED_SAMPLE");
|
||||
if (env) {
|
||||
int n = atoi(env);
|
||||
if (n > 0 && n < 31) {
|
||||
uint32_t mask = (uint32_t)((1u << n) - 1u);
|
||||
atomic_store(&g_sample_mask, mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline int tiny_unified_should_sample(void) {
|
||||
uint32_t mask = atomic_load(&g_sample_mask);
|
||||
if (mask == 0) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t x = atomic_fetch_add(&g_seq, 1);
|
||||
return ((x & mask) == 0);
|
||||
}
|
||||
|
||||
void hak_tiny_unified_stat_alloc(int class_idx, int is_hit) {
|
||||
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) {
|
||||
return;
|
||||
}
|
||||
if (!tiny_unified_should_sample()) {
|
||||
return;
|
||||
}
|
||||
if (is_hit) {
|
||||
atomic_fetch_add(&g_tiny_unified_hit[class_idx], 1);
|
||||
} else {
|
||||
atomic_fetch_add(&g_tiny_unified_miss[class_idx], 1);
|
||||
}
|
||||
}
|
||||
|
||||
void hak_tiny_unified_stats_snapshot(uint64_t hits[TINY_NUM_CLASSES],
|
||||
uint64_t misses[TINY_NUM_CLASSES],
|
||||
int reset) {
|
||||
if (!hits || !misses) {
|
||||
return;
|
||||
}
|
||||
for (int i = 0; i < TINY_NUM_CLASSES; i++) {
|
||||
hits[i] = atomic_load(&g_tiny_unified_hit[i]);
|
||||
misses[i] = atomic_load(&g_tiny_unified_miss[i]);
|
||||
}
|
||||
if (reset) {
|
||||
for (int i = 0; i < TINY_NUM_CLASSES; i++) {
|
||||
atomic_store(&g_tiny_unified_hit[i], 0);
|
||||
atomic_store(&g_tiny_unified_miss[i], 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user