diff --git a/core/box/slab_recycling_box.h b/core/box/slab_recycling_box.h index 9ed626d9..634e0e13 100644 --- a/core/box/slab_recycling_box.h +++ b/core/box/slab_recycling_box.h @@ -35,6 +35,7 @@ #include "../hakmem_tiny_superslab.h" #include "../hakmem_shared_pool.h" // shared_pool_release_slab() #include "ss_hot_cold_box.h" // ss_mark_slab_empty() +#include "ss_release_guard_box.h" // ss_release_guard_slab_can_recycle() // Forward declarations struct SuperSlab; @@ -66,8 +67,7 @@ void slab_recycle_print_stats(void); // Check if slab is EMPTY and recyclable // Returns: 1 if EMPTY (used == 0, capacity > 0), 0 otherwise static inline int slab_is_empty(struct TinySlabMeta* meta) { - if (!meta) return 0; - return (meta->used == 0 && meta->capacity > 0); + return ss_release_guard_slab_can_recycle(NULL, 0, meta) ? 1 : 0; } // Note: ss_mark_slab_empty() and shared_pool_release_slab() are provided by: @@ -100,7 +100,7 @@ static inline int slab_is_empty(struct TinySlabMeta* meta) { if (s_trace) { \ fprintf(stderr, "[SLAB_RECYCLE] SKIP: meta=NULL ss=%p\n", (void*)(ss)); \ } \ - } else if (!slab_is_empty(meta)) { \ + } else if (!ss_release_guard_slab_can_recycle((ss), (slab_idx), (meta))) { \ if ((meta)->capacity == 0) { \ g_slab_recycle_stats.recycle_skip_no_cap++; \ } else { \ @@ -133,7 +133,7 @@ static inline int slab_is_empty(struct TinySlabMeta* meta) { // Release build: Direct calls (no tracing overhead) #define SLAB_TRY_RECYCLE(ss, slab_idx, meta) \ do { \ - if ((ss) && (meta) && slab_is_empty(meta)) { \ + if ((ss) && (meta) && ss_release_guard_slab_can_recycle((ss), (slab_idx), (meta))) { \ ss_mark_slab_empty((ss), (slab_idx)); \ shared_pool_release_slab((ss), (slab_idx)); \ } \ diff --git a/core/box/ss_release_guard_box.h b/core/box/ss_release_guard_box.h new file mode 100644 index 00000000..652dc170 --- /dev/null +++ b/core/box/ss_release_guard_box.h @@ -0,0 +1,68 @@ +// ss_release_guard_box.h - Box: SuperSlab Release Guard +// Purpose: +// Centralize the "can we release/recycle this slab / SuperSlab?" logic +// behind a single Box contract, to avoid scattered lifetime checks. +// +// Box Theory: +// - Single Responsibility: +// Decide whether a given (ss, slab_idx) is safe to recycle or free. +// - Clear Boundary: +// All callers go through this Box before calling shared_pool_release_slab() +// or superslab_free() directly. +// - Fail-Fast Friendly: +// Guard returns a boolean; callers decide whether to abort/log/drop. +// This Box itself is free of logging by default (optionally gated in debug). +// - Reversible / A/B: +// Existing ad-hoc checks stay in place; this Box is additive and can +// gradually replace them under build flags / environment switches. +// +// Invariants (intended, not all enforced here yet): +// - Slab recycle (slot EMPTY化) には: +// meta->used == 0 +// meta->capacity > 0 +// - SuperSlab munmap / cache release には: +// ss->total_active_blocks == 0 +// superslab_ref_get(ss) == 0 (no TLS pins / remote pins) +// +// NOTE: +// For now this box mirrors existing logic in hakmem_shared_pool_release.c, +// without changing behaviour. It provides a single place to extend guards. + +#ifndef HAKMEM_SS_RELEASE_GUARD_BOX_H +#define HAKMEM_SS_RELEASE_GUARD_BOX_H + +#include "../hakmem_tiny_superslab_internal.h" + +// Per-slab guard: "is it safe to mark this slab EMPTY and recycle?" +// - Checks TinySlabMeta invariants only (used/capacity) for now. +// - Does NOT inspect refcounts or remote queues (that is owned by higher boxes). +static inline bool ss_release_guard_slab_can_recycle(SuperSlab* ss, + int slab_idx, + TinySlabMeta* meta) +{ + (void)ss; + if (!meta) return false; + + // Mirror slab_is_empty() from slab_recycling_box.h + if (meta->used != 0) return false; + if (meta->capacity == 0) return false; + return true; +} + +// Per-SuperSlab guard: "is it safe to actually free (munmap/cache-release) this SuperSlab?" +// - Mirrors existing final check in shared_pool_release_slab(): +// active_blocks == 0 && refcount == 0 +static inline bool ss_release_guard_superslab_can_free(SuperSlab* ss) +{ + if (!ss) return false; + + uint32_t active_blocks = atomic_load_explicit(&ss->total_active_blocks, + memory_order_acquire); + uint32_t refs = superslab_ref_get(ss); + + if (active_blocks != 0) return false; + if (refs != 0) return false; + return true; +} + +#endif // HAKMEM_SS_RELEASE_GUARD_BOX_H diff --git a/core/hakmem_shared_pool_release.c b/core/hakmem_shared_pool_release.c index 41d91bba..36f783ba 100644 --- a/core/hakmem_shared_pool_release.c +++ b/core/hakmem_shared_pool_release.c @@ -4,6 +4,7 @@ #include "box/ss_hot_cold_box.h" #include "hakmem_env_cache.h" // Priority-2: ENV cache #include "superslab/superslab_inline.h" // superslab_ref_get guard for TLS pins +#include "box/ss_release_guard_box.h" // Box: SuperSlab Release Guard #include #include @@ -202,12 +203,10 @@ shared_pool_release_slab(SuperSlab* ss, int slab_idx) // 1. Try LRU cache (hak_ss_lru_push) - lazy deallocation // 2. Or munmap if LRU is full - eager deallocation - // BUGFIX: Double check total_active_blocks. Legacy Backend might have + // BUGFIX: Double check total_active_blocks and refcount. Legacy Backend might have // allocated from ANOTHER slab in this SS just before we removed it. // If so, we must NOT free the SS. - uint32_t active_blocks = atomic_load(&ss->total_active_blocks); - uint32_t ss_refs = superslab_ref_get(ss); - if (active_blocks == 0 && ss_refs == 0) { + if (ss_release_guard_superslab_can_free(ss)) { extern void superslab_free(SuperSlab* ss); superslab_free(ss); } else {