Merge separate g_tls_sll_head[] and g_tls_sll_count[] arrays into unified TinyTLSSLL struct to improve L1D cache locality. Expected performance gain: +12-18% from reducing cache line splits (2 loads → 1 load per operation). Changes: - core/hakmem_tiny.h: Add TinyTLSSLL type (16B aligned, head+count+pad) - core/hakmem_tiny.c: Replace separate arrays with g_tls_sll[8] - core/box/tls_sll_box.h: Update Box API (13 sites) for unified access - Updated 32+ files: All g_tls_sll_head[i] → g_tls_sll[i].head - Updated 32+ files: All g_tls_sll_count[i] → g_tls_sll[i].count - core/hakmem_tiny_integrity.h: Unified canary guards - core/box/integrity_box.c: Simplified canary validation - Makefile: Added core/box/tiny_sizeclass_hist_box.o to link Build: ✅ PASS (10K ops sanity test) Warnings: Only pre-existing LTO type mismatches (unrelated) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
95 lines
3.5 KiB
C
95 lines
3.5 KiB
C
// ss_stats_box.c - SuperSlab Statistics Box Implementation
|
|
#include "ss_stats_box.h"
|
|
#include "../superslab/superslab_inline.h"
|
|
#include <pthread.h>
|
|
#include <stdio.h>
|
|
|
|
// ============================================================================
|
|
// Global Statistics State
|
|
// ============================================================================
|
|
|
|
static pthread_mutex_t g_superslab_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
uint64_t g_superslabs_allocated = 0; // Non-static for debugging
|
|
uint64_t g_superslabs_freed = 0; // Non-static for test access
|
|
uint64_t g_bytes_allocated = 0; // Non-static for debugging
|
|
|
|
// Per-class counters (Tiny classes = 8)
|
|
uint64_t g_ss_alloc_by_class[8] = {0};
|
|
uint64_t g_ss_freed_by_class[8] = {0};
|
|
|
|
// Cache statistics
|
|
uint64_t g_superslabs_reused = 0;
|
|
uint64_t g_superslabs_cached = 0;
|
|
|
|
// Debug counters (free path instrumentation)
|
|
_Atomic uint64_t g_ss_active_dec_calls = 0;
|
|
_Atomic uint64_t g_hak_tiny_free_calls = 0;
|
|
_Atomic uint64_t g_ss_remote_push_calls = 0;
|
|
_Atomic uint64_t g_free_ss_enter = 0; // hak_tiny_free_superslab() entries
|
|
_Atomic uint64_t g_free_local_box_calls = 0; // same-thread freelist pushes
|
|
_Atomic uint64_t g_free_remote_box_calls = 0; // cross-thread remote pushes
|
|
|
|
// ============================================================================
|
|
// Statistics Update Implementation
|
|
// ============================================================================
|
|
|
|
void ss_stats_os_alloc(uint8_t size_class, size_t ss_size) {
|
|
pthread_mutex_lock(&g_superslab_lock);
|
|
g_superslabs_allocated++;
|
|
if (size_class < 8) {
|
|
g_ss_alloc_by_class[size_class]++;
|
|
}
|
|
g_bytes_allocated += ss_size;
|
|
pthread_mutex_unlock(&g_superslab_lock);
|
|
}
|
|
|
|
void ss_stats_cache_reuse(void) {
|
|
pthread_mutex_lock(&g_superslab_lock);
|
|
g_superslabs_reused++;
|
|
pthread_mutex_unlock(&g_superslab_lock);
|
|
}
|
|
|
|
void ss_stats_cache_store(void) {
|
|
pthread_mutex_lock(&g_superslab_lock);
|
|
g_superslabs_cached++;
|
|
pthread_mutex_unlock(&g_superslab_lock);
|
|
}
|
|
|
|
// ============================================================================
|
|
// Statistics Reporting Implementation
|
|
// ============================================================================
|
|
|
|
void superslab_print_stats(SuperSlab* ss) {
|
|
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
|
|
printf("Invalid SuperSlab\n");
|
|
return;
|
|
}
|
|
|
|
printf("=== SuperSlab Stats ===\n");
|
|
printf("Address: %p\n", (void*)ss);
|
|
// Phase 12: per-SS size_class removed; classes are per-slab via meta->class_idx.
|
|
printf("Active slabs: %u / %d\n", ss->active_slabs, ss_slabs_capacity(ss));
|
|
printf("Bitmap: 0x%08X\n", ss->slab_bitmap);
|
|
printf("\nPer-slab details:\n");
|
|
for (int i = 0; i < ss_slabs_capacity(ss); i++) {
|
|
if (ss->slab_bitmap & (1u << i)) {
|
|
TinySlabMeta* meta = &ss->slabs[i];
|
|
printf(" Slab %2d: used=%u/%u freelist=%p class=%u owner_tid_low=%u\n",
|
|
i, meta->used, meta->capacity, meta->freelist,
|
|
(unsigned)meta->class_idx, (unsigned)meta->owner_tid_low);
|
|
}
|
|
}
|
|
printf("\n");
|
|
}
|
|
|
|
void superslab_print_global_stats(void) {
|
|
pthread_mutex_lock(&g_superslab_lock);
|
|
printf("=== Global SuperSlab Stats ===\n");
|
|
printf("SuperSlabs allocated: %lu\n", g_superslabs_allocated);
|
|
printf("SuperSlabs freed: %lu\n", g_superslabs_freed);
|
|
printf("SuperSlabs active: %lu\n", g_superslabs_allocated - g_superslabs_freed);
|
|
printf("Total bytes allocated: %lu MB\n", g_bytes_allocated / (1024 * 1024));
|
|
pthread_mutex_unlock(&g_superslab_lock);
|
|
}
|