Files
hakmem/core/hakmem_shared_pool_release.c

214 lines
7.7 KiB
C
Raw Normal View History

#include "hakmem_shared_pool_internal.h"
#include "hakmem_debug_master.h"
#include "box/ss_slab_meta_box.h"
#include "box/ss_hot_cold_box.h"
#include "hakmem_env_cache.h" // Priority-2: ENV cache
Add SuperSlab refcount pinning and critical failsafe guards Major breakthrough: sh8bench now completes without SIGSEGV! Added defensive refcounting and failsafe mechanisms to prevent use-after-free and corruption propagation. Changes: 1. SuperSlab Refcount Pinning (core/box/tls_sll_box.h) - tls_sll_push_impl: increment refcount before adding to list - tls_sll_pop_impl: decrement refcount when removing from list - Prevents SuperSlab from being freed while TLS SLL holds pointers 2. SuperSlab Release Guards (core/superslab_allocate.c, shared_pool_release.c) - Check refcount > 0 before freeing SuperSlab - If refcount > 0, defer release instead of freeing - Prevents use-after-free when TLS/remote/freelist hold stale pointers 3. TLS SLL Next Pointer Validation (core/box/tls_sll_box.h) - Detect invalid next pointer during traversal - Log [TLS_SLL_NEXT_INVALID] when detected - Drop list to prevent corruption propagation 4. Unified Cache Freelist Validation (core/front/tiny_unified_cache.c) - Validate freelist head before use - Log [UNIFIED_FREELIST_INVALID] for corrupted lists - Defensive drop to prevent bad allocations 5. Early Refcount Decrement Fix (core/tiny_free_fast.inc.h) - Removed ss_active_dec_one from fast path - Prevents premature refcount depletion - Defers decrement to proper cleanup path Test Results: ✅ sh8bench completes successfully (exit code 0) ✅ No SIGSEGV or ABORT signals ✅ Short runs (5s) crash-free ⚠️ Multiple [TLS_SLL_NEXT_INVALID] / [UNIFIED_FREELIST_INVALID] logged ⚠️ Invalid pointers still present (stale references exist) Status Analysis: - Stability: ACHIEVED (no crashes) - Root Cause: NOT FULLY SOLVED (invalid pointers remain) - Approach: Defensive + refcount guards working well Remaining Issues: ❌ Why does SuperSlab get unregistered while TLS SLL holds pointers? ❌ SuperSlab lifecycle: remote_queue / adopt / LRU interactions? ❌ Stale pointers indicate improper SuperSlab lifetime management Performance Impact: - Refcount operations: +1-3 cycles per push/pop (minor) - Validation checks: +2-5 cycles (minor) - Overall: < 5% overhead estimated Next Investigation: - Trace SuperSlab lifecycle (allocation → registration → unregister → free) - Check remote_queue handling - Verify adopt/LRU mechanisms - Correlate stale pointer logs with SuperSlab unregister events Log Volume Warning: - May produce many diagnostic logs on long runs - Consider ENV gating for production Technical Notes: - Refcount is per-SuperSlab, not global - Guards prevent symptom propagation, not root cause - Root cause is in SuperSlab lifecycle management 🤖 Generated with Claude Code (https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-03 21:56:52 +09:00
#include "superslab/superslab_inline.h" // superslab_ref_get guard for TLS pins
#include <stdlib.h>
#include <stdio.h>
#include <stdatomic.h>
void
shared_pool_release_slab(SuperSlab* ss, int slab_idx)
{
// Phase 12: SP-SLOT Box - Slot-based Release
//
// Flow:
// 1. Validate inputs and check meta->used == 0
// 2. Find SharedSSMeta for this SuperSlab
// 3. Mark slot ACTIVE → EMPTY
// 4. Push to per-class free list (enables same-class reuse)
// 5. If all slots EMPTY → superslab_free() → LRU cache
if (!ss) {
return;
}
if (slab_idx < 0 || slab_idx >= SLABS_PER_SUPERSLAB_MAX) {
return;
}
// Phase 9-2 FIX: Promote Legacy SuperSlabs to Shared Pool on first recycle
// If we are recycling a slot from a Legacy SS, we must remove it from the
// Legacy list (g_superslab_heads) to prevent Legacy Backend from allocating
// from it simultaneously (Double Allocation Race).
// This effectively transfers ownership to Shared Pool.
extern void remove_superslab_from_legacy_head(SuperSlab* ss);
remove_superslab_from_legacy_head(ss);
// BUGFIX: Re-check used count after removal. Legacy Backend might have
// allocated from this slab while we were waiting for the lock in remove().
TinySlabMeta* slab_meta = &ss->slabs[slab_idx];
if (atomic_load_explicit(&slab_meta->used, memory_order_acquire) != 0) {
// Legacy Backend stole this slab. It's now an orphan (removed from list).
// We abort recycling. It will be recycled when Legacy frees it later.
return;
}
// Debug logging
#if !HAKMEM_BUILD_RELEASE
// Priority-2: Use cached ENV
int dbg = HAK_ENV_SS_FREE_DEBUG();
#else
static const int dbg = 0;
#endif
// P0 instrumentation: count lock acquisitions
lock_stats_init();
if (g_lock_stats_enabled == 1) {
atomic_fetch_add(&g_lock_stats_enabled, 1);
atomic_fetch_add(&g_lock_release_slab_count, 1);
}
pthread_mutex_lock(&g_shared_pool.alloc_lock);
// TinySlabMeta* slab_meta = &ss->slabs[slab_idx]; // Already declared above
if (slab_meta->used != 0) {
// Not actually empty (double check under lock)
if (g_lock_stats_enabled == 1) {
atomic_fetch_add(&g_lock_release_count, 1);
}
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
return;
}
uint8_t class_idx = slab_meta->class_idx;
#if !HAKMEM_BUILD_RELEASE
if (dbg == 1) {
fprintf(stderr, "[SP_SLOT_RELEASE] ss=%p slab_idx=%d class=%d used=0 (marking EMPTY)\n",
(void*)ss, slab_idx, class_idx);
}
#endif
// Find SharedSSMeta for this SuperSlab
SharedSSMeta* sp_meta = NULL;
uint32_t count = atomic_load_explicit(&g_shared_pool.ss_meta_count, memory_order_relaxed);
for (uint32_t i = 0; i < count; i++) {
// RACE FIX: Load pointer atomically
SuperSlab* meta_ss = atomic_load_explicit(&g_shared_pool.ss_metadata[i].ss, memory_order_relaxed);
if (meta_ss == ss) {
sp_meta = &g_shared_pool.ss_metadata[i];
break;
}
}
if (!sp_meta) {
// SuperSlab not in SP-SLOT system yet - create metadata
sp_meta = sp_meta_find_or_create(ss);
if (!sp_meta) {
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
return; // Failed to create metadata
}
}
// Mark slot as EMPTY (ACTIVE → EMPTY)
uint32_t slab_bit = (1u << slab_idx);
SlotState slot_state = atomic_load_explicit(
&sp_meta->slots[slab_idx].state,
memory_order_acquire);
if (slot_state != SLOT_ACTIVE && (ss->slab_bitmap & slab_bit)) {
// Legacy path import: rebuild slot states from SuperSlab bitmap/class_map
sp_meta_sync_slots_from_ss(sp_meta, ss);
slot_state = atomic_load_explicit(
&sp_meta->slots[slab_idx].state,
memory_order_acquire);
}
if (slot_state != SLOT_ACTIVE || sp_slot_mark_empty(sp_meta, slab_idx) != 0) {
if (g_lock_stats_enabled == 1) {
atomic_fetch_add(&g_lock_release_count, 1);
}
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
return; // Slot wasn't ACTIVE
}
// Update SuperSlab metadata
uint32_t bit = (1u << slab_idx);
if (ss->slab_bitmap & bit) {
ss->slab_bitmap &= ~bit;
slab_meta->class_idx = 255; // UNASSIGNED
// P1.1: Mark class_map as UNASSIGNED when releasing slab
ss->class_map[slab_idx] = 255;
if (ss->active_slabs > 0) {
ss->active_slabs--;
if (ss->active_slabs == 0 && g_shared_pool.active_count > 0) {
g_shared_pool.active_count--;
}
}
if (class_idx < TINY_NUM_CLASSES_SS &&
g_shared_pool.class_active_slots[class_idx] > 0) {
g_shared_pool.class_active_slots[class_idx]--;
}
}
// P0-4: Push to lock-free per-class free list (enables reuse by same class)
// Note: push BEFORE releasing mutex (slot state already updated under lock)
if (class_idx < TINY_NUM_CLASSES_SS) {
sp_freelist_push_lockfree(class_idx, sp_meta, slab_idx);
#if !HAKMEM_BUILD_RELEASE
if (dbg == 1) {
fprintf(stderr, "[SP_SLOT_FREELIST_LOCKFREE] class=%d pushed slot (ss=%p slab=%d) active_slots=%u/%u\n",
class_idx, (void*)ss, slab_idx,
sp_meta->active_slots, sp_meta->total_slots);
}
#endif
}
// Check if SuperSlab is now completely empty (all slots EMPTY or UNUSED)
if (sp_meta->active_slots == 0) {
#if !HAKMEM_BUILD_RELEASE
if (dbg == 1) {
fprintf(stderr, "[SP_SLOT_COMPLETELY_EMPTY] ss=%p active_slots=0 (calling superslab_free)\n",
(void*)ss);
}
#endif
if (g_lock_stats_enabled == 1) {
atomic_fetch_add(&g_lock_release_count, 1);
}
// RACE FIX: Set meta->ss to NULL BEFORE unlocking mutex
// This prevents Stage 2 from accessing freed SuperSlab
atomic_store_explicit(&sp_meta->ss, NULL, memory_order_release);
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
// Remove from legacy backend list (moved to top of function)
// extern void remove_superslab_from_legacy_head(SuperSlab* ss);
// remove_superslab_from_legacy_head(ss);
// Free SuperSlab:
// 1. Try LRU cache (hak_ss_lru_push) - lazy deallocation
// 2. Or munmap if LRU is full - eager deallocation
// BUGFIX: Double check total_active_blocks. Legacy Backend might have
// allocated from ANOTHER slab in this SS just before we removed it.
// If so, we must NOT free the SS.
Add SuperSlab refcount pinning and critical failsafe guards Major breakthrough: sh8bench now completes without SIGSEGV! Added defensive refcounting and failsafe mechanisms to prevent use-after-free and corruption propagation. Changes: 1. SuperSlab Refcount Pinning (core/box/tls_sll_box.h) - tls_sll_push_impl: increment refcount before adding to list - tls_sll_pop_impl: decrement refcount when removing from list - Prevents SuperSlab from being freed while TLS SLL holds pointers 2. SuperSlab Release Guards (core/superslab_allocate.c, shared_pool_release.c) - Check refcount > 0 before freeing SuperSlab - If refcount > 0, defer release instead of freeing - Prevents use-after-free when TLS/remote/freelist hold stale pointers 3. TLS SLL Next Pointer Validation (core/box/tls_sll_box.h) - Detect invalid next pointer during traversal - Log [TLS_SLL_NEXT_INVALID] when detected - Drop list to prevent corruption propagation 4. Unified Cache Freelist Validation (core/front/tiny_unified_cache.c) - Validate freelist head before use - Log [UNIFIED_FREELIST_INVALID] for corrupted lists - Defensive drop to prevent bad allocations 5. Early Refcount Decrement Fix (core/tiny_free_fast.inc.h) - Removed ss_active_dec_one from fast path - Prevents premature refcount depletion - Defers decrement to proper cleanup path Test Results: ✅ sh8bench completes successfully (exit code 0) ✅ No SIGSEGV or ABORT signals ✅ Short runs (5s) crash-free ⚠️ Multiple [TLS_SLL_NEXT_INVALID] / [UNIFIED_FREELIST_INVALID] logged ⚠️ Invalid pointers still present (stale references exist) Status Analysis: - Stability: ACHIEVED (no crashes) - Root Cause: NOT FULLY SOLVED (invalid pointers remain) - Approach: Defensive + refcount guards working well Remaining Issues: ❌ Why does SuperSlab get unregistered while TLS SLL holds pointers? ❌ SuperSlab lifecycle: remote_queue / adopt / LRU interactions? ❌ Stale pointers indicate improper SuperSlab lifetime management Performance Impact: - Refcount operations: +1-3 cycles per push/pop (minor) - Validation checks: +2-5 cycles (minor) - Overall: < 5% overhead estimated Next Investigation: - Trace SuperSlab lifecycle (allocation → registration → unregister → free) - Check remote_queue handling - Verify adopt/LRU mechanisms - Correlate stale pointer logs with SuperSlab unregister events Log Volume Warning: - May produce many diagnostic logs on long runs - Consider ENV gating for production Technical Notes: - Refcount is per-SuperSlab, not global - Guards prevent symptom propagation, not root cause - Root cause is in SuperSlab lifecycle management 🤖 Generated with Claude Code (https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-03 21:56:52 +09:00
uint32_t active_blocks = atomic_load(&ss->total_active_blocks);
uint32_t ss_refs = superslab_ref_get(ss);
if (active_blocks == 0 && ss_refs == 0) {
extern void superslab_free(SuperSlab* ss);
superslab_free(ss);
} else {
#if !HAKMEM_BUILD_RELEASE
if (dbg == 1) {
Add SuperSlab refcount pinning and critical failsafe guards Major breakthrough: sh8bench now completes without SIGSEGV! Added defensive refcounting and failsafe mechanisms to prevent use-after-free and corruption propagation. Changes: 1. SuperSlab Refcount Pinning (core/box/tls_sll_box.h) - tls_sll_push_impl: increment refcount before adding to list - tls_sll_pop_impl: decrement refcount when removing from list - Prevents SuperSlab from being freed while TLS SLL holds pointers 2. SuperSlab Release Guards (core/superslab_allocate.c, shared_pool_release.c) - Check refcount > 0 before freeing SuperSlab - If refcount > 0, defer release instead of freeing - Prevents use-after-free when TLS/remote/freelist hold stale pointers 3. TLS SLL Next Pointer Validation (core/box/tls_sll_box.h) - Detect invalid next pointer during traversal - Log [TLS_SLL_NEXT_INVALID] when detected - Drop list to prevent corruption propagation 4. Unified Cache Freelist Validation (core/front/tiny_unified_cache.c) - Validate freelist head before use - Log [UNIFIED_FREELIST_INVALID] for corrupted lists - Defensive drop to prevent bad allocations 5. Early Refcount Decrement Fix (core/tiny_free_fast.inc.h) - Removed ss_active_dec_one from fast path - Prevents premature refcount depletion - Defers decrement to proper cleanup path Test Results: ✅ sh8bench completes successfully (exit code 0) ✅ No SIGSEGV or ABORT signals ✅ Short runs (5s) crash-free ⚠️ Multiple [TLS_SLL_NEXT_INVALID] / [UNIFIED_FREELIST_INVALID] logged ⚠️ Invalid pointers still present (stale references exist) Status Analysis: - Stability: ACHIEVED (no crashes) - Root Cause: NOT FULLY SOLVED (invalid pointers remain) - Approach: Defensive + refcount guards working well Remaining Issues: ❌ Why does SuperSlab get unregistered while TLS SLL holds pointers? ❌ SuperSlab lifecycle: remote_queue / adopt / LRU interactions? ❌ Stale pointers indicate improper SuperSlab lifetime management Performance Impact: - Refcount operations: +1-3 cycles per push/pop (minor) - Validation checks: +2-5 cycles (minor) - Overall: < 5% overhead estimated Next Investigation: - Trace SuperSlab lifecycle (allocation → registration → unregister → free) - Check remote_queue handling - Verify adopt/LRU mechanisms - Correlate stale pointer logs with SuperSlab unregister events Log Volume Warning: - May produce many diagnostic logs on long runs - Consider ENV gating for production Technical Notes: - Refcount is per-SuperSlab, not global - Guards prevent symptom propagation, not root cause - Root cause is in SuperSlab lifecycle management 🤖 Generated with Claude Code (https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-03 21:56:52 +09:00
fprintf(stderr,
"[SP_SLOT_RELEASE] SKIP free ss=%p: total_active_blocks=%u refcount=%u\n",
(void*)ss,
(unsigned)active_blocks,
(unsigned)ss_refs);
}
#endif
}
return;
}
if (g_lock_stats_enabled == 1) {
atomic_fetch_add(&g_lock_release_count, 1);
}
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
}