#ifndef HAKMEM_TINY_LEGACY_FALLBACK_BOX_H #define HAKMEM_TINY_LEGACY_FALLBACK_BOX_H #include #include #include "../front/tiny_unified_cache.h" #include "../front/tiny_first_page_cache.h" // Phase 3 C2: First page inline cache #include "../hakmem.h" #include "tiny_front_v3_env_box.h" #include "free_path_stats_box.h" #include "tiny_front_hot_box.h" #include "tiny_metadata_cache_env_box.h" // Phase 3 C2: Metadata cache ENV gate #include "hakmem_env_snapshot_box.h" // Phase 4 E1: ENV snapshot consolidation #include "tiny_unified_cache_fastapi_env_box.h" // Phase 74-3: FASTAPI ENV gate #include "tiny_c6_inline_slots_env_box.h" // Phase 75-1: C6 inline slots ENV gate #include "../front/tiny_c6_inline_slots.h" // Phase 75-1: C6 inline slots API // Purpose: Encapsulate legacy free logic (shared by multiple paths) // Called by: malloc_tiny_fast.h (free path) + tiny_c6_ultra_free_box.c (C6 fallback) // // Contract: // - base: BASE pointer (already extracted via ptr - 1) // - class_idx: size class (0-7) // - Returns: void (always succeeds or falls back to tiny_hot_free_fast) // __attribute__((always_inline)) static inline void tiny_legacy_fallback_free_base_with_env(void* base, uint32_t class_idx, const HakmemEnvSnapshot* env) { // Phase 75-1: C6 Inline Slots early-exit (ENV gated) // Try C6 inline slots FIRST (before unified cache) for class 6 if (class_idx == 6 && tiny_c6_inline_slots_enabled()) { if (c6_inline_push(c6_inline_tls(), base)) { // Success: pushed to C6 inline slots FREE_PATH_STAT_INC(legacy_fallback); if (__builtin_expect(free_path_stats_enabled(), 0)) { g_free_path_stats.legacy_by_class[class_idx]++; } return; } // FULL → fall through to unified cache } const TinyFrontV3Snapshot* front_snap = env ? (env->tiny_front_v3_enabled ? tiny_front_v3_snapshot_get() : NULL) : (__builtin_expect(tiny_front_v3_enabled(), 0) ? tiny_front_v3_snapshot_get() : NULL); const bool metadata_cache_on = env ? env->tiny_metadata_cache_eff : tiny_metadata_cache_enabled(); // Phase 3 C2 Patch 2: First page cache hint (optional fast-path) // Check if pointer is in cached page (avoids metadata lookup in future optimizations) if (__builtin_expect(metadata_cache_on, 0)) { // Note: This is a hint-only check. Even if it hits, we still use the standard path. // The cache will be populated during refill operations for future use. // Currently this just validates the cache state; actual optimization TBD. if (tiny_first_page_cache_hit(class_idx, base, 4096)) { // Future: could optimize metadata access here } } // Legacy fallback - Unified Cache push if (!front_snap || front_snap->unified_cache_on) { // Phase 74-3 (P0): FASTAPI path (ENV-gated) if (tiny_uc_fastapi_enabled()) { // Preconditions guaranteed: // - unified_cache_on == true (checked above) // - TLS init guaranteed by front_gate_unified_enabled() in malloc_tiny_fast.h // - Stats compiled-out in FAST builds if (unified_cache_push_fast(class_idx, HAK_BASE_FROM_RAW(base))) { FREE_PATH_STAT_INC(legacy_fallback); // Per-class breakdown (Phase 4-1) if (__builtin_expect(free_path_stats_enabled(), 0)) { if (class_idx < 8) { g_free_path_stats.legacy_by_class[class_idx]++; } } return; } // FULL → fallback to slow path (rare) } // Original path (FASTAPI=0 or fallback) if (unified_cache_push(class_idx, HAK_BASE_FROM_RAW(base))) { FREE_PATH_STAT_INC(legacy_fallback); // Per-class breakdown (Phase 4-1) if (__builtin_expect(free_path_stats_enabled(), 0)) { if (class_idx < 8) { g_free_path_stats.legacy_by_class[class_idx]++; } } return; } } // Final fallback tiny_hot_free_fast(class_idx, base); } __attribute__((always_inline)) static inline void tiny_legacy_fallback_free_base(void* base, uint32_t class_idx) { const HakmemEnvSnapshot* env = hakmem_env_snapshot_enabled() ? hakmem_env_snapshot() : NULL; tiny_legacy_fallback_free_base_with_env(base, class_idx, env); } #endif // HAKMEM_TINY_LEGACY_FALLBACK_BOX_H