P1.3: Add meta->active for TLS SLL tracking
Add active field to TinySlabMeta to track blocks currently held by users (not in TLS SLL or freelist caches). This enables accurate empty slab detection that accounts for TLS SLL cached blocks. Changes: - superslab_types.h: Add _Atomic uint16_t active field - ss_allocation_box.c, hakmem_tiny_superslab.c: Initialize active=0 - tiny_free_fast_v2.inc.h: Decrement active on TLS SLL push - tiny_alloc_fast.inc.h: Add tiny_active_track_alloc() helper, increment active on TLS SLL pop (all code paths) - ss_hot_cold_box.h: ss_is_slab_empty() uses active when enabled All tracking is ENV-gated: HAKMEM_TINY_ACTIVE_TRACK=1 to enable. Default is off for zero performance impact. Invariant: active = used - tls_cached (active <= used) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@ -37,6 +37,27 @@
|
||||
#include <stdio.h>
|
||||
#include <stdatomic.h>
|
||||
|
||||
// P1.3: Helper to increment meta->active when allocating from TLS SLL
|
||||
// ENV gate: HAKMEM_TINY_ACTIVE_TRACK=1 to enable (default: 0 for performance)
|
||||
static inline void tiny_active_track_alloc(void* base) {
|
||||
static __thread int g_active_track = -1;
|
||||
if (__builtin_expect(g_active_track == -1, 0)) {
|
||||
const char* e = getenv("HAKMEM_TINY_ACTIVE_TRACK");
|
||||
g_active_track = (e && *e && *e != '0') ? 1 : 0;
|
||||
}
|
||||
if (__builtin_expect(g_active_track, 0)) {
|
||||
extern SuperSlab* ss_fast_lookup(void* ptr);
|
||||
SuperSlab* ss = ss_fast_lookup(base);
|
||||
if (ss && ss->magic == SUPERSLAB_MAGIC) {
|
||||
int slab_idx = slab_index_for(ss, base);
|
||||
if (slab_idx >= 0 && slab_idx < ss_slabs_capacity(ss)) {
|
||||
TinySlabMeta* meta = &ss->slabs[slab_idx];
|
||||
atomic_fetch_add_explicit(&meta->active, 1, memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Diag counter: size>=1024 allocations routed to Tiny (env: HAKMEM_TINY_ALLOC_1024_METRIC)
|
||||
extern _Atomic uint64_t g_tiny_alloc_ge1024[];
|
||||
static inline void tiny_diag_track_size_ge1024_fast(size_t req_size, int class_idx) {
|
||||
@ -364,6 +385,8 @@ static inline void* tiny_alloc_fast_pop(int class_idx) {
|
||||
// Front Gate: SLL hit (SLIM fast path - 3 instructions)
|
||||
extern unsigned long long g_front_sll_hit[];
|
||||
g_front_sll_hit[class_idx]++;
|
||||
// P1.3: Track active when allocating from TLS SLL
|
||||
tiny_active_track_alloc(base);
|
||||
return base;
|
||||
}
|
||||
}
|
||||
@ -436,6 +459,9 @@ static inline void* tiny_alloc_fast_pop(int class_idx) {
|
||||
extern unsigned long long g_front_sll_hit[];
|
||||
g_front_sll_hit[class_idx]++;
|
||||
|
||||
// P1.3: Track active when allocating from TLS SLL
|
||||
tiny_active_track_alloc(base);
|
||||
|
||||
#if HAKMEM_DEBUG_COUNTERS
|
||||
// Track TLS freelist hits (compile-time gated, zero runtime cost when disabled)
|
||||
g_free_via_tls_sll[class_idx]++;
|
||||
@ -786,7 +812,13 @@ static inline void* tiny_alloc_fast(size_t size) {
|
||||
#endif
|
||||
} else {
|
||||
void* base = NULL;
|
||||
if (tls_sll_pop(class_idx, &base)) ptr = base; else ptr = NULL;
|
||||
if (tls_sll_pop(class_idx, &base)) {
|
||||
// P1.3: Track active when allocating from TLS SLL
|
||||
tiny_active_track_alloc(base);
|
||||
ptr = base;
|
||||
} else {
|
||||
ptr = NULL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ptr = NULL; // SLL disabled OR Front-Direct active → bypass SLL
|
||||
@ -826,7 +858,13 @@ static inline void* tiny_alloc_fast(size_t size) {
|
||||
#endif
|
||||
} else {
|
||||
void* base2 = NULL;
|
||||
if (tls_sll_pop(class_idx, &base2)) ptr = base2; else ptr = NULL;
|
||||
if (tls_sll_pop(class_idx, &base2)) {
|
||||
// P1.3: Track active when allocating from TLS SLL
|
||||
tiny_active_track_alloc(base2);
|
||||
ptr = base2;
|
||||
} else {
|
||||
ptr = NULL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ptr = NULL; // SLL disabled OR Front-Direct active → bypass SLL
|
||||
|
||||
Reference in New Issue
Block a user