Fix C7 warm/TLS Release path and unify debug instrumentation
This commit is contained in:
@ -10,11 +10,145 @@
|
||||
#include "hakmem_policy.h"
|
||||
#include "hakmem_env_cache.h" // Priority-2: ENV cache
|
||||
#include "front/tiny_warm_pool.h" // Warm Pool: Prefill during registry scans
|
||||
#include "box/ss_slab_reset_box.h" // Box: Reset slab metadata on reuse (C7 guard)
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <stdatomic.h>
|
||||
|
||||
static inline void c7_log_meta_state(const char* tag, SuperSlab* ss, int slab_idx) {
|
||||
if (!ss) return;
|
||||
#if HAKMEM_BUILD_RELEASE
|
||||
static _Atomic uint32_t rel_c7_meta_logs = 0;
|
||||
uint32_t n = atomic_fetch_add_explicit(&rel_c7_meta_logs, 1, memory_order_relaxed);
|
||||
if (n < 8) {
|
||||
TinySlabMeta* m = &ss->slabs[slab_idx];
|
||||
fprintf(stderr,
|
||||
"[REL_C7_%s] ss=%p slab=%d cls=%u used=%u cap=%u carved=%u freelist=%p\n",
|
||||
tag,
|
||||
(void*)ss,
|
||||
slab_idx,
|
||||
(unsigned)m->class_idx,
|
||||
(unsigned)m->used,
|
||||
(unsigned)m->capacity,
|
||||
(unsigned)m->carved,
|
||||
m->freelist);
|
||||
}
|
||||
#else
|
||||
static _Atomic uint32_t dbg_c7_meta_logs = 0;
|
||||
uint32_t n = atomic_fetch_add_explicit(&dbg_c7_meta_logs, 1, memory_order_relaxed);
|
||||
if (n < 8) {
|
||||
TinySlabMeta* m = &ss->slabs[slab_idx];
|
||||
fprintf(stderr,
|
||||
"[DBG_C7_%s] ss=%p slab=%d cls=%u used=%u cap=%u carved=%u freelist=%p\n",
|
||||
tag,
|
||||
(void*)ss,
|
||||
slab_idx,
|
||||
(unsigned)m->class_idx,
|
||||
(unsigned)m->used,
|
||||
(unsigned)m->capacity,
|
||||
(unsigned)m->carved,
|
||||
m->freelist);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int c7_meta_is_pristine(TinySlabMeta* m) {
|
||||
return m && m->used == 0 && m->carved == 0 && m->freelist == NULL;
|
||||
}
|
||||
|
||||
static inline void c7_log_skip_nonempty_acquire(SuperSlab* ss,
|
||||
int slab_idx,
|
||||
TinySlabMeta* m,
|
||||
const char* tag) {
|
||||
if (!(ss && m)) return;
|
||||
#if HAKMEM_BUILD_RELEASE
|
||||
static _Atomic uint32_t rel_c7_skip_logs = 0;
|
||||
uint32_t n = atomic_fetch_add_explicit(&rel_c7_skip_logs, 1, memory_order_relaxed);
|
||||
if (n < 4) {
|
||||
fprintf(stderr,
|
||||
"[REL_C7_%s] ss=%p slab=%d cls=%u used=%u cap=%u carved=%u freelist=%p\n",
|
||||
tag,
|
||||
(void*)ss,
|
||||
slab_idx,
|
||||
(unsigned)m->class_idx,
|
||||
(unsigned)m->used,
|
||||
(unsigned)m->capacity,
|
||||
(unsigned)m->carved,
|
||||
m->freelist);
|
||||
}
|
||||
#else
|
||||
static _Atomic uint32_t dbg_c7_skip_logs = 0;
|
||||
uint32_t n = atomic_fetch_add_explicit(&dbg_c7_skip_logs, 1, memory_order_relaxed);
|
||||
if (n < 4) {
|
||||
fprintf(stderr,
|
||||
"[DBG_C7_%s] ss=%p slab=%d cls=%u used=%u cap=%u carved=%u freelist=%p\n",
|
||||
tag,
|
||||
(void*)ss,
|
||||
slab_idx,
|
||||
(unsigned)m->class_idx,
|
||||
(unsigned)m->used,
|
||||
(unsigned)m->capacity,
|
||||
(unsigned)m->carved,
|
||||
m->freelist);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int c7_reset_and_log_if_needed(SuperSlab* ss,
|
||||
int slab_idx,
|
||||
int class_idx) {
|
||||
if (class_idx != 7) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
TinySlabMeta* m = &ss->slabs[slab_idx];
|
||||
c7_log_meta_state("ACQUIRE_META", ss, slab_idx);
|
||||
|
||||
if (m->class_idx != 255 && m->class_idx != (uint8_t)class_idx) {
|
||||
#if HAKMEM_BUILD_RELEASE
|
||||
static _Atomic uint32_t rel_c7_class_mismatch_logs = 0;
|
||||
uint32_t n = atomic_fetch_add_explicit(&rel_c7_class_mismatch_logs, 1, memory_order_relaxed);
|
||||
if (n < 4) {
|
||||
fprintf(stderr,
|
||||
"[REL_C7_CLASS_MISMATCH] ss=%p slab=%d want=%d have=%u used=%u cap=%u carved=%u\n",
|
||||
(void*)ss,
|
||||
slab_idx,
|
||||
class_idx,
|
||||
(unsigned)m->class_idx,
|
||||
(unsigned)m->used,
|
||||
(unsigned)m->capacity,
|
||||
(unsigned)m->carved);
|
||||
}
|
||||
#else
|
||||
static _Atomic uint32_t dbg_c7_class_mismatch_logs = 0;
|
||||
uint32_t n = atomic_fetch_add_explicit(&dbg_c7_class_mismatch_logs, 1, memory_order_relaxed);
|
||||
if (n < 4) {
|
||||
fprintf(stderr,
|
||||
"[DBG_C7_CLASS_MISMATCH] ss=%p slab=%d want=%d have=%u used=%u cap=%u carved=%u freelist=%p\n",
|
||||
(void*)ss,
|
||||
slab_idx,
|
||||
class_idx,
|
||||
(unsigned)m->class_idx,
|
||||
(unsigned)m->used,
|
||||
(unsigned)m->capacity,
|
||||
(unsigned)m->carved,
|
||||
m->freelist);
|
||||
}
|
||||
#endif
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!c7_meta_is_pristine(m)) {
|
||||
c7_log_skip_nonempty_acquire(ss, slab_idx, m, "SKIP_NONEMPTY_ACQUIRE");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ss_slab_reset_meta_for_tiny(ss, slab_idx, class_idx);
|
||||
c7_log_meta_state("ACQUIRE", ss, slab_idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Performance Measurement: Shared Pool Lock Contention (ENV-gated)
|
||||
// ============================================================================
|
||||
@ -147,7 +281,12 @@ sp_acquire_from_empty_scan(int class_idx, SuperSlab** ss_out, int* slab_idx_out,
|
||||
fprintf(stderr, "[STAGE0.5_STATS] hits=%lu attempts=%lu rate=%.1f%% (scan_limit=%d warm_pool=%d)\n",
|
||||
hits, attempts, (double)hits * 100.0 / attempts, scan_limit, tiny_warm_pool_count(class_idx));
|
||||
}
|
||||
return 0;
|
||||
if (c7_reset_and_log_if_needed(primary_result, primary_slab_idx, class_idx) == 0) {
|
||||
return 0;
|
||||
}
|
||||
primary_result = NULL;
|
||||
*ss_out = NULL;
|
||||
*slab_idx_out = -1;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@ -216,6 +355,15 @@ stage1_retry_after_tension_drain:
|
||||
if (ss_guard) {
|
||||
tiny_tls_slab_reuse_guard(ss_guard);
|
||||
|
||||
if (class_idx == 7) {
|
||||
TinySlabMeta* meta = &ss_guard->slabs[reuse_slot_idx];
|
||||
if (!c7_meta_is_pristine(meta)) {
|
||||
c7_log_skip_nonempty_acquire(ss_guard, reuse_slot_idx, meta, "SKIP_NONEMPTY_ACQUIRE");
|
||||
sp_freelist_push_lockfree(class_idx, reuse_meta, reuse_slot_idx);
|
||||
goto stage2_fallback;
|
||||
}
|
||||
}
|
||||
|
||||
// P-Tier: Skip DRAINING tier SuperSlabs
|
||||
if (!ss_tier_is_hot(ss_guard)) {
|
||||
// DRAINING SuperSlab - skip this slot and fall through to Stage 2
|
||||
@ -270,6 +418,15 @@ stage1_retry_after_tension_drain:
|
||||
|
||||
*ss_out = ss;
|
||||
*slab_idx_out = reuse_slot_idx;
|
||||
if (c7_reset_and_log_if_needed(ss, reuse_slot_idx, class_idx) != 0) {
|
||||
*ss_out = NULL;
|
||||
*slab_idx_out = -1;
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
atomic_fetch_add(&g_lock_release_count, 1);
|
||||
}
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
goto stage2_fallback;
|
||||
}
|
||||
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
atomic_fetch_add(&g_lock_release_count, 1);
|
||||
@ -338,6 +495,19 @@ stage2_fallback:
|
||||
1, memory_order_relaxed);
|
||||
}
|
||||
|
||||
if (class_idx == 7) {
|
||||
TinySlabMeta* meta = &ss->slabs[claimed_idx];
|
||||
if (!c7_meta_is_pristine(meta)) {
|
||||
c7_log_skip_nonempty_acquire(ss, claimed_idx, meta, "SKIP_NONEMPTY_ACQUIRE");
|
||||
sp_slot_mark_empty(hint_meta, claimed_idx);
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
atomic_fetch_add(&g_lock_release_count, 1);
|
||||
}
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
goto stage2_scan;
|
||||
}
|
||||
}
|
||||
|
||||
// Update SuperSlab metadata under mutex
|
||||
ss->slab_bitmap |= (1u << claimed_idx);
|
||||
ss_slab_meta_class_idx_set(ss, claimed_idx, (uint8_t)class_idx);
|
||||
@ -353,6 +523,15 @@ stage2_fallback:
|
||||
// Hint is still good, no need to update
|
||||
*ss_out = ss;
|
||||
*slab_idx_out = claimed_idx;
|
||||
if (c7_reset_and_log_if_needed(ss, claimed_idx, class_idx) != 0) {
|
||||
*ss_out = NULL;
|
||||
*slab_idx_out = -1;
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
atomic_fetch_add(&g_lock_release_count, 1);
|
||||
}
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
goto stage2_scan;
|
||||
}
|
||||
sp_fix_geometry_if_needed(ss, claimed_idx, class_idx);
|
||||
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
@ -432,6 +611,19 @@ stage2_scan:
|
||||
1, memory_order_relaxed);
|
||||
}
|
||||
|
||||
if (class_idx == 7) {
|
||||
TinySlabMeta* meta_slab = &ss->slabs[claimed_idx];
|
||||
if (!c7_meta_is_pristine(meta_slab)) {
|
||||
c7_log_skip_nonempty_acquire(ss, claimed_idx, meta_slab, "SKIP_NONEMPTY_ACQUIRE");
|
||||
sp_slot_mark_empty(meta, claimed_idx);
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
atomic_fetch_add(&g_lock_release_count, 1);
|
||||
}
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Update SuperSlab metadata under mutex
|
||||
ss->slab_bitmap |= (1u << claimed_idx);
|
||||
ss_slab_meta_class_idx_set(ss, claimed_idx, (uint8_t)class_idx);
|
||||
@ -449,6 +641,15 @@ stage2_scan:
|
||||
|
||||
*ss_out = ss;
|
||||
*slab_idx_out = claimed_idx;
|
||||
if (c7_reset_and_log_if_needed(ss, claimed_idx, class_idx) != 0) {
|
||||
*ss_out = NULL;
|
||||
*slab_idx_out = -1;
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
atomic_fetch_add(&g_lock_release_count, 1);
|
||||
}
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
continue;
|
||||
}
|
||||
sp_fix_geometry_if_needed(ss, claimed_idx, class_idx);
|
||||
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
@ -623,6 +824,15 @@ stage2_scan:
|
||||
|
||||
*ss_out = new_ss;
|
||||
*slab_idx_out = first_slot;
|
||||
if (c7_reset_and_log_if_needed(new_ss, first_slot, class_idx) != 0) {
|
||||
*ss_out = NULL;
|
||||
*slab_idx_out = -1;
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
atomic_fetch_add(&g_lock_release_count, 1);
|
||||
}
|
||||
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
|
||||
return -1;
|
||||
}
|
||||
sp_fix_geometry_if_needed(new_ss, first_slot, class_idx);
|
||||
|
||||
if (g_lock_stats_enabled == 1) {
|
||||
|
||||
Reference in New Issue
Block a user