Files
hakmem/core/box/front_fastlane_stats_box.h

110 lines
5.4 KiB
C
Raw Normal View History

#ifndef HAK_FRONT_FASTLANE_STATS_BOX_H
#define HAK_FRONT_FASTLANE_STATS_BOX_H
// ============================================================================
// Phase 6: Front FastLane - Stats Box
// ============================================================================
//
// Purpose: Visibility into FastLane hit/fallback rates
//
// Counters (compile-out when HAKMEM_DEBUG_COUNTERS=0):
//
// Malloc:
// - malloc_total: Total try_malloc attempts
// - malloc_hit: Successful FastLane alloc
// - malloc_fallback_*: Fallback reasons (3-6 types)
//
// Free:
// - free_total: Total try_free attempts
// - free_hit: Successful FastLane free
// - free_fallback_*: Fallback reasons (3-6 types)
//
// Output (on exit, if HAKMEM_DEBUG_COUNTERS=1):
// [FRONT_FASTLANE] malloc_total=N hit=N fb_*=N ... free_total=N hit=N fb_*=N ...
//
// Box Theory:
// - L2: Stats layer (compile-out when counters disabled)
// - Zero overhead: No-op macros when HAKMEM_DEBUG_COUNTERS=0
//
// ============================================================================
#include <stdatomic.h>
#include <stdint.h>
#include <stdio.h>
#if HAKMEM_DEBUG_COUNTERS
// Stats structure (global, thread-safe via atomics)
typedef struct {
// Malloc stats
_Atomic uint64_t malloc_total; // Total try_malloc calls
_Atomic uint64_t malloc_hit; // Successful FastLane alloc
_Atomic uint64_t malloc_fallback_stub; // Stub: not implemented yet (Patch 2)
_Atomic uint64_t malloc_fallback_size; // Size out of Tiny range
_Atomic uint64_t malloc_fallback_class; // Class calculation failed
_Atomic uint64_t malloc_fallback_alloc; // Allocation failed (refill needed)
_Atomic uint64_t malloc_fallback_other; // Other reasons
// Free stats
_Atomic uint64_t free_total; // Total try_free calls
_Atomic uint64_t free_hit; // Successful FastLane free
_Atomic uint64_t free_fallback_stub; // Stub: not implemented yet (Patch 2)
_Atomic uint64_t free_fallback_aligned; // Page-aligned pointer
_Atomic uint64_t free_fallback_header; // Invalid header magic
_Atomic uint64_t free_fallback_class; // Class out of bounds
_Atomic uint64_t free_fallback_failure; // Free failed (cold path needed)
_Atomic uint64_t free_fallback_other; // Other reasons
} FrontFastLaneStats;
// Global stats instance
static FrontFastLaneStats g_front_fastlane_stats = {0};
// Increment macros (relaxed ordering - stats only)
#define FRONT_FASTLANE_STAT_INC(field) \
atomic_fetch_add_explicit(&g_front_fastlane_stats.field, 1, memory_order_relaxed)
// Dump stats on exit (call from wrapper destructor or main)
static void front_fastlane_stats_dump(void) {
uint64_t m_total = atomic_load_explicit(&g_front_fastlane_stats.malloc_total, memory_order_relaxed);
uint64_t f_total = atomic_load_explicit(&g_front_fastlane_stats.free_total, memory_order_relaxed);
if (m_total == 0 && f_total == 0) return; // No activity
// Malloc stats
uint64_t m_hit = atomic_load_explicit(&g_front_fastlane_stats.malloc_hit, memory_order_relaxed);
uint64_t m_fb_stub = atomic_load_explicit(&g_front_fastlane_stats.malloc_fallback_stub, memory_order_relaxed);
uint64_t m_fb_size = atomic_load_explicit(&g_front_fastlane_stats.malloc_fallback_size, memory_order_relaxed);
uint64_t m_fb_class = atomic_load_explicit(&g_front_fastlane_stats.malloc_fallback_class, memory_order_relaxed);
uint64_t m_fb_alloc = atomic_load_explicit(&g_front_fastlane_stats.malloc_fallback_alloc, memory_order_relaxed);
uint64_t m_fb_other = atomic_load_explicit(&g_front_fastlane_stats.malloc_fallback_other, memory_order_relaxed);
// Free stats
uint64_t f_hit = atomic_load_explicit(&g_front_fastlane_stats.free_hit, memory_order_relaxed);
uint64_t f_fb_stub = atomic_load_explicit(&g_front_fastlane_stats.free_fallback_stub, memory_order_relaxed);
uint64_t f_fb_aligned = atomic_load_explicit(&g_front_fastlane_stats.free_fallback_aligned, memory_order_relaxed);
uint64_t f_fb_header = atomic_load_explicit(&g_front_fastlane_stats.free_fallback_header, memory_order_relaxed);
uint64_t f_fb_class = atomic_load_explicit(&g_front_fastlane_stats.free_fallback_class, memory_order_relaxed);
uint64_t f_fb_failure = atomic_load_explicit(&g_front_fastlane_stats.free_fallback_failure, memory_order_relaxed);
uint64_t f_fb_other = atomic_load_explicit(&g_front_fastlane_stats.free_fallback_other, memory_order_relaxed);
fprintf(stderr, "[FRONT_FASTLANE] malloc_total=%lu hit=%lu fb_stub=%lu fb_size=%lu fb_class=%lu fb_alloc=%lu fb_other=%lu | "
"free_total=%lu hit=%lu fb_stub=%lu fb_aligned=%lu fb_header=%lu fb_class=%lu fb_failure=%lu fb_other=%lu\n",
(unsigned long)m_total, (unsigned long)m_hit,
(unsigned long)m_fb_stub, (unsigned long)m_fb_size, (unsigned long)m_fb_class,
(unsigned long)m_fb_alloc, (unsigned long)m_fb_other,
(unsigned long)f_total, (unsigned long)f_hit,
(unsigned long)f_fb_stub, (unsigned long)f_fb_aligned, (unsigned long)f_fb_header,
(unsigned long)f_fb_class, (unsigned long)f_fb_failure, (unsigned long)f_fb_other);
}
#else // HAKMEM_DEBUG_COUNTERS == 0
// No-op macros (zero overhead)
#define FRONT_FASTLANE_STAT_INC(field) do {} while(0)
static inline void front_fastlane_stats_dump(void) {}
#endif // HAKMEM_DEBUG_COUNTERS
#endif // HAK_FRONT_FASTLANE_STATS_BOX_H