diff --git a/core/box/tls_sll_drain_box.h b/core/box/tls_sll_drain_box.h index 1548dd3a..b3c62a02 100644 --- a/core/box/tls_sll_drain_box.h +++ b/core/box/tls_sll_drain_box.h @@ -233,8 +233,9 @@ static inline uint32_t tiny_tls_sll_try_drain(int class_idx) { // Check if interval reached uint32_t interval = tls_sll_drain_get_interval(); if (__builtin_expect(g_tls_sll_drain_counter[class_idx] >= interval, 0)) { - // Trigger drain (drain ~32 blocks for now, tune later) - uint32_t drained = tiny_tls_sll_drain(class_idx, 32); + // Trigger drain (drain ALL blocks to enable empty detection) + // batch_size=0 means drain all available blocks + uint32_t drained = tiny_tls_sll_drain(class_idx, 0); // Reset counter g_tls_sll_drain_counter[class_idx] = 0; diff --git a/core/hakmem_shared_pool.c b/core/hakmem_shared_pool.c index e3e162e4..a72f1f09 100644 --- a/core/hakmem_shared_pool.c +++ b/core/hakmem_shared_pool.c @@ -264,8 +264,8 @@ shared_pool_release_slab(SuperSlab* ss, int slab_idx) } if (dbg == 1) { - fprintf(stderr, "[SS_SLAB_EMPTY] ss=%p slab_idx=%d class=%d used=0 (releasing to pool)\n", - (void*)ss, slab_idx, meta->class_idx); + fprintf(stderr, "[SS_SLAB_EMPTY] ss=%p slab_idx=%d class=%d used=0 active_slabs_before=%u (releasing to pool)\n", + (void*)ss, slab_idx, meta->class_idx, ss->active_slabs); } uint32_t bit = (1u << slab_idx);