Files
hakmem/core/front/tiny_c3_inline_slots.h

81 lines
3.0 KiB
C
Raw Normal View History

// tiny_c3_inline_slots.h - Phase 77-1: C3 Inline Slots Fast-Path API
//
// Goal: Zero-overhead always-inline push/pop for C3 FIFO ring buffer
// Scope: C3 allocations (64-128B)
// Design: Fail-fast to unified_cache on full/empty
//
// Fast-Path Strategy:
// - Always-inline push/pop for zero-call-overhead
// - Modulo arithmetic inlined (tail/head)
// - Return NULL on empty, 0 on full (caller handles fallback)
// - No bounds checking (ring size fixed at compile time)
//
// Integration Points:
// - Alloc: Call c3_inline_pop() in tiny_front_hot_box BEFORE unified_cache
// - Free: Call c3_inline_push() in tiny_legacy_fallback BEFORE unified_cache
//
// Rationale:
// - Same pattern as C4/C5/C6 inline slots (proven +7.05% cumulative)
// - Conservative cap (256) = 2KB/thread (Phase 77-0 recommendation)
// - Fail-fast design = no performance cliff if full/empty
#ifndef HAK_FRONT_TINY_C3_INLINE_SLOTS_H
#define HAK_FRONT_TINY_C3_INLINE_SLOTS_H
#include <stdint.h>
#include "../box/tiny_c3_inline_slots_tls_box.h"
#include "../box/tiny_c3_inline_slots_env_box.h"
#include "../box/tiny_inline_slots_fixed_mode_box.h"
#include "../box/tiny_inline_slots_overflow_stats_box.h"
// ============================================================================
// C3 Inline Slots: Fast-Path Push/Pop (Always-Inline)
// ============================================================================
// Get TLS pointer for C3 inline slots
// Inline for zero overhead
static inline TinyC3InlineSlots* c3_inline_tls(void) {
extern __thread TinyC3InlineSlots g_tiny_c3_inline_slots;
return &g_tiny_c3_inline_slots;
}
// Push pointer to C3 inline ring
// Returns: 1 if success, 0 if full (caller must fallback to unified_cache)
__attribute__((always_inline))
static inline int c3_inline_push(TinyC3InlineSlots* slots, void* ptr) {
tiny_inline_slots_count_push_total(3); // Phase 87: Telemetry (all attempts)
// Check if ring is full
if (__builtin_expect(c3_inline_full(slots), 0)) {
tiny_inline_slots_count_push_full(3); // Phase 87: Telemetry (overflow)
return 0; // Full, caller must use unified_cache
}
// Enqueue at tail
slots->slots[slots->tail] = ptr;
slots->tail = (slots->tail + 1) % TINY_C3_INLINE_CAPACITY;
return 1; // Success
}
// Pop pointer from C3 inline ring
// Returns: non-NULL if success, NULL if empty (caller must fallback to unified_cache)
__attribute__((always_inline))
static inline void* c3_inline_pop(TinyC3InlineSlots* slots) {
tiny_inline_slots_count_pop_total(3); // Phase 87: Telemetry (all attempts)
// Check if ring is empty
if (__builtin_expect(c3_inline_empty(slots), 0)) {
tiny_inline_slots_count_pop_empty(3); // Phase 87: Telemetry (underflow)
return NULL; // Empty, caller must use unified_cache
}
// Dequeue from head
void* ptr = slots->slots[slots->head];
slots->head = (slots->head + 1) % TINY_C3_INLINE_CAPACITY;
return ptr; // Success
}
#endif // HAK_FRONT_TINY_C3_INLINE_SLOTS_H