Files
hakmem/core/box/front_fastlane_box.h

190 lines
7.2 KiB
C
Raw Normal View History

#ifndef HAK_FRONT_FASTLANE_BOX_H
#define HAK_FRONT_FASTLANE_BOX_H
// ============================================================================
// Phase 6: Front FastLane Box (Hot Inline / Try API)
// ============================================================================
//
// Purpose: Single-box entry point for malloc/free hot paths
// Collapses wrapper→gate→policy→route layers into one
//
// API:
// void* front_fastlane_try_malloc(size_t size)
// - Returns non-NULL on success (handled by FastLane)
// - Returns NULL on failure (fallback to existing wrapper path)
//
// bool front_fastlane_try_free(void* ptr)
// - Returns true if handled (success)
// - Returns false if not handled (fallback to existing wrapper path)
//
// Box Theory:
// - L0: ENV gate (front_fastlane_env_box.h)
// - L1: This file (hot inline handlers)
// - L2: Stats (front_fastlane_stats_box.h, cold helpers in .c)
//
// Strategy:
// - Read existing "winning boxes" only once
// - Call existing hot handlers (malloc_tiny_fast_for_class, free_tiny_fast)
// - No duplicate checks (deduplicate existing wrapper logic)
// - Fail-fast: Any uncertainty → return not-handled
//
// Safety:
2025-12-14 16:30:32 +09:00
// - ENV-gated (default ON, opt-out)
// - Single fallback boundary (FastLane → ColdFallback)
// - Reversible (ENV toggle)
//
// ============================================================================
#include <stddef.h>
#include <stdbool.h>
#include <stdint.h>
#include "front_fastlane_env_box.h"
#include "front_fastlane_stats_box.h"
#include "../hakmem_tiny.h" // hak_tiny_size_to_class, tiny_get_max_size
#include "../front/malloc_tiny_fast.h" // malloc_tiny_fast_for_class
// FastLane is only safe after global init completes.
// Before init, wrappers must handle recursion guards + syscall init.
extern int g_initialized;
// ============================================================================
// Hot Inline: try_malloc
// ============================================================================
// Patch 4: Actual Tiny routing implementation
// Strategy: Read existing winning boxes only once, call existing hot handlers
// No duplicate checks (deduplicate existing wrapper logic)
static inline void* front_fastlane_try_malloc(size_t size) {
FRONT_FASTLANE_STAT_INC(malloc_total);
// Fail-fast: do not enter FastLane before init completes.
if (__builtin_expect(!g_initialized, 0)) {
FRONT_FASTLANE_STAT_INC(malloc_fallback_other);
return NULL;
}
// Fast path: Size check (Tiny range only)
// Use cached max size (typically 256 or 1024)
size_t max_size = tiny_get_max_size();
if (__builtin_expect(size > max_size, 0)) {
FRONT_FASTLANE_STAT_INC(malloc_fallback_size);
return NULL; // Not Tiny → fallback
}
// Class calculation (single LUT lookup, no branches)
int class_idx = hak_tiny_size_to_class(size);
if (__builtin_expect(class_idx < 0 || class_idx >= 8, 0)) {
FRONT_FASTLANE_STAT_INC(malloc_fallback_class);
return NULL; // Invalid class → fallback
}
// Class mask check (gradual rollout support)
uint8_t mask = front_fastlane_class_mask();
if (__builtin_expect(((mask >> class_idx) & 1) == 0, 0)) {
FRONT_FASTLANE_STAT_INC(malloc_fallback_other);
return NULL; // Class not enabled → fallback
}
// Call existing hot handler (no duplication)
// This is the winning path from E5-4 / Phase 4 E2
void* ptr = malloc_tiny_fast_for_class(size, class_idx);
if (__builtin_expect(ptr != NULL, 1)) {
FRONT_FASTLANE_STAT_INC(malloc_hit);
return ptr; // Success
}
// Allocation failed (refill needed, TLS exhausted, etc.)
FRONT_FASTLANE_STAT_INC(malloc_fallback_alloc);
return NULL; // Fallback to cold path
}
// ============================================================================
// Hot Inline: try_free
// ============================================================================
// Phase 6-2: Free DeDup optimization
// Strategy:
// - When dedup=1 and class_mask=0xFF: Direct call to free_tiny_fast() (no duplicate header check)
// - Otherwise: Existing header validation path (backward compatible)
static inline bool front_fastlane_try_free(void* ptr) {
FRONT_FASTLANE_STAT_INC(free_total);
// Fail-fast: do not enter FastLane before init completes.
if (__builtin_expect(!g_initialized, 0)) {
FRONT_FASTLANE_STAT_INC(free_fallback_other);
return false;
}
#if HAKMEM_TINY_HEADER_CLASSIDX
// Phase 6-2: DeDup path (eliminate duplicate header validation)
// Conditions:
// 1. Free DeDup enabled (ENV=1)
// 2. All classes enabled (mask=0xFF, no gradual rollout)
if (__builtin_expect(front_fastlane_free_dedup_enabled() && front_fastlane_class_mask() == 0xFF, 1)) {
// Direct call to free_tiny_fast() (handles all validation internally)
// free_tiny_fast() is static inline in malloc_tiny_fast.h, no extern needed
int result = free_tiny_fast(ptr);
if (__builtin_expect(result, 1)) {
FRONT_FASTLANE_STAT_INC(free_hit);
return true; // Handled
}
// Not handled → fallback
FRONT_FASTLANE_STAT_INC(free_fallback_failure);
return false;
}
// Traditional path (backward compatible, for class mask filtering or dedup=0)
// Page boundary guard: ptr must not be page-aligned
// (Accessing ptr-1 when ptr is page-aligned could segfault)
uintptr_t off = (uintptr_t)ptr & 0xFFFu;
if (__builtin_expect(off == 0, 0)) {
FRONT_FASTLANE_STAT_INC(free_fallback_aligned);
return false; // Page-aligned → fallback (unsafe to read header)
}
// Fast header validation (1 load, 1 compare)
uint8_t header = *((uint8_t*)ptr - 1);
uint8_t magic = header & 0xF0u;
if (__builtin_expect(magic != 0xA0u, 0)) {
// Not Tiny header (could be Mid/Pool/Large or external allocation)
if (magic != 0) {
FRONT_FASTLANE_STAT_INC(free_fallback_header);
}
return false; // Not Tiny → fallback
}
// Extract class index from header (lower 4 bits)
int class_idx = (int)(header & 0x0Fu);
if (__builtin_expect(class_idx >= 8, 0)) {
FRONT_FASTLANE_STAT_INC(free_fallback_class);
return false; // Invalid class → fallback
}
// Class mask check (gradual rollout support)
uint8_t mask = front_fastlane_class_mask();
if (__builtin_expect(((mask >> class_idx) & 1) == 0, 0)) {
FRONT_FASTLANE_STAT_INC(free_fallback_other);
return false; // Class not enabled → fallback
}
// Call existing hot handler (no duplication)
// This is the winning path from E5-1 (free_tiny_fast returns 1 on success)
// free_tiny_fast() is static inline in malloc_tiny_fast.h, no extern needed
if (__builtin_expect(free_tiny_fast(ptr), 1)) {
FRONT_FASTLANE_STAT_INC(free_hit);
return true; // Success
}
// Free failed (cold path needed - refill, full TLS, etc.)
FRONT_FASTLANE_STAT_INC(free_fallback_failure);
return false; // Fallback to cold path
#else
// No header support → always fallback
FRONT_FASTLANE_STAT_INC(free_fallback_other);
return false;
#endif
}
#endif // HAK_FRONT_FASTLANE_BOX_H