Files
hakmem/core/box/ss_stats_box.c

152 lines
5.7 KiB
C

// ss_stats_box.c - SuperSlab Statistics Box Implementation
#include "ss_stats_box.h"
#include <stdbool.h>
#include "../superslab/superslab_inline.h"
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
// ============================================================================
// Global Statistics State
// ============================================================================
static pthread_mutex_t g_superslab_lock = PTHREAD_MUTEX_INITIALIZER;
uint64_t g_superslabs_allocated = 0; // Non-static for debugging
uint64_t g_superslabs_freed = 0; // Non-static for test access
uint64_t g_bytes_allocated = 0; // Non-static for debugging
// Per-class counters (Tiny classes = 8)
uint64_t g_ss_alloc_by_class[8] = {0};
uint64_t g_ss_freed_by_class[8] = {0};
// Cache statistics
uint64_t g_superslabs_reused = 0;
uint64_t g_superslabs_cached = 0;
// Debug counters (free path instrumentation)
_Atomic uint64_t g_ss_active_dec_calls = 0;
_Atomic uint64_t g_hak_tiny_free_calls = 0;
_Atomic uint64_t g_ss_remote_push_calls = 0;
_Atomic uint64_t g_free_ss_enter = 0; // hak_tiny_free_superslab() entries
_Atomic uint64_t g_free_local_box_calls = 0; // same-thread freelist pushes
_Atomic uint64_t g_free_remote_box_calls = 0; // cross-thread remote pushes
// Superslab/slab observability (Tiny-only; relaxed updates)
_Atomic uint64_t g_ss_live_by_class[8] = {0};
_Atomic uint64_t g_ss_empty_events[8] = {0};
_Atomic uint64_t g_slab_live_events[8] = {0};
// ============================================================================
// Statistics Update Implementation
// ============================================================================
void ss_stats_os_alloc(uint8_t size_class, size_t ss_size) {
pthread_mutex_lock(&g_superslab_lock);
g_superslabs_allocated++;
if (size_class < 8) {
g_ss_alloc_by_class[size_class]++;
}
g_bytes_allocated += ss_size;
pthread_mutex_unlock(&g_superslab_lock);
}
void ss_stats_cache_reuse(void) {
pthread_mutex_lock(&g_superslab_lock);
g_superslabs_reused++;
pthread_mutex_unlock(&g_superslab_lock);
}
void ss_stats_cache_store(void) {
pthread_mutex_lock(&g_superslab_lock);
g_superslabs_cached++;
pthread_mutex_unlock(&g_superslab_lock);
}
void ss_stats_on_ss_alloc_class(int class_idx) {
if (class_idx >= 0 && class_idx < 8) {
atomic_fetch_add_explicit(&g_ss_live_by_class[class_idx], 1, memory_order_relaxed);
}
}
void ss_stats_on_ss_free_class(int class_idx) {
if (class_idx >= 0 && class_idx < 8) {
// Saturating-style decrement to avoid underflow from mismatched hooks
uint64_t prev = atomic_load_explicit(&g_ss_live_by_class[class_idx], memory_order_relaxed);
if (prev > 0) {
atomic_fetch_sub_explicit(&g_ss_live_by_class[class_idx], 1, memory_order_relaxed);
}
}
}
void ss_stats_on_ss_scan(int class_idx, int slab_live, int is_empty) {
if (class_idx < 0 || class_idx >= 8) {
return;
}
if (slab_live > 0) {
atomic_fetch_add_explicit(&g_slab_live_events[class_idx],
(uint64_t)slab_live,
memory_order_relaxed);
}
if (is_empty) {
atomic_fetch_add_explicit(&g_ss_empty_events[class_idx], 1, memory_order_relaxed);
}
}
// ============================================================================
// Statistics Reporting Implementation
// ============================================================================
void superslab_print_stats(SuperSlab* ss) {
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
printf("Invalid SuperSlab\n");
return;
}
printf("=== SuperSlab Stats ===\n");
printf("Address: %p\n", (void*)ss);
// Phase 12: per-SS size_class removed; classes are per-slab via meta->class_idx.
printf("Active slabs: %u / %d\n", ss->active_slabs, ss_slabs_capacity(ss));
printf("Bitmap: 0x%08X\n", ss->slab_bitmap);
printf("\nPer-slab details:\n");
for (int i = 0; i < ss_slabs_capacity(ss); i++) {
if (ss->slab_bitmap & (1u << i)) {
TinySlabMeta* meta = &ss->slabs[i];
printf(" Slab %2d: used=%u/%u freelist=%p class=%u owner_tid_low=%u\n",
i, meta->used, meta->capacity, meta->freelist,
(unsigned)meta->class_idx, (unsigned)meta->owner_tid_low);
}
}
printf("\n");
}
void superslab_print_global_stats(void) {
pthread_mutex_lock(&g_superslab_lock);
printf("=== Global SuperSlab Stats ===\n");
printf("SuperSlabs allocated: %lu\n", g_superslabs_allocated);
printf("SuperSlabs freed: %lu\n", g_superslabs_freed);
printf("SuperSlabs active: %lu\n", g_superslabs_allocated - g_superslabs_freed);
printf("Total bytes allocated: %lu MB\n", g_bytes_allocated / (1024 * 1024));
pthread_mutex_unlock(&g_superslab_lock);
}
void ss_stats_dump_if_requested(void) {
const char* env = getenv("HAKMEM_SS_STATS_DUMP");
if (!env || !*env || *env == '0') {
return;
}
fprintf(stderr, "[SS_STATS] class live empty_events slab_live_events\n");
for (int c = 0; c < 8; c++) {
uint64_t live = atomic_load_explicit(&g_ss_live_by_class[c], memory_order_relaxed);
uint64_t empty = atomic_load_explicit(&g_ss_empty_events[c], memory_order_relaxed);
uint64_t slab_live = atomic_load_explicit(&g_slab_live_events[c], memory_order_relaxed);
if (live || empty || slab_live) {
fprintf(stderr, " C%d: live=%llu empty=%llu slab_live=%llu\n",
c,
(unsigned long long)live,
(unsigned long long)empty,
(unsigned long long)slab_live);
}
}
}