Files
hakmem/core/slab_handle.h
Moe Charm (CI) 52386401b3 Debug Counters Implementation - Clean History
Major Features:
- Debug counter infrastructure for Refill Stage tracking
- Free Pipeline counters (ss_local, ss_remote, tls_sll)
- Diagnostic counters for early return analysis
- Unified larson.sh benchmark runner with profiles
- Phase 6-3 regression analysis documentation

Bug Fixes:
- Fix SuperSlab disabled by default (HAKMEM_TINY_USE_SUPERSLAB)
- Fix profile variable naming consistency
- Add .gitignore patterns for large files

Performance:
- Phase 6-3: 4.79 M ops/s (has OOM risk)
- With SuperSlab: 3.13 M ops/s (+19% improvement)

This is a clean repository without large log files.

🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-05 12:31:14 +09:00

331 lines
12 KiB
C
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// slab_handle.h - SlabHandle Box (Ownership + Remote Drain + Metadata Access)
// Purpose: Encapsulate slab ownership acquisition, remote drain, and metadata access
// Invariant: valid==1 ⇔ owner_tid==self && safe to drain/modify
#pragma once
#include <stdint.h>
#include <stdatomic.h>
#include <signal.h>
#include "hakmem_tiny_superslab.h"
#include "tiny_debug_ring.h"
#include "tiny_remote.h"
extern int g_debug_remote_guard;
extern int g_tiny_safe_free_strict;
// Box: SlabHandle
// Capsule: Ownership, remote drain, metadata access
// Invariant: Operations only succeed when valid==1 (owned by current thread)
typedef struct SlabHandle {
SuperSlab* ss; // SuperSlab pointer
TinySlabMeta* meta; // Cached metadata pointer
uint8_t slab_idx; // Slab index within SuperSlab
uint32_t owner_tid; // Owner thread ID (cached)
uint8_t valid; // 1=owned, 0=invalid/unowned
uint8_t _pad[3]; // Padding
} SlabHandle;
// Core operations
// Try to acquire ownership of a slab
// Returns valid handle on success, invalid handle on failure
// MUST be called before any drain/modify operations
static inline SlabHandle slab_try_acquire(SuperSlab* ss, int idx, uint32_t tid) {
SlabHandle h = {0};
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
return h; // Invalid SuperSlab
}
int cap = ss_slabs_capacity(ss);
if (idx < 0 || idx >= cap) {
return h; // Invalid index
}
TinySlabMeta* m = &ss->slabs[idx];
// Try to acquire ownership (Box 3: Ownership)
if (!ss_owner_try_acquire(m, tid)) {
return h; // Failed to acquire
}
// Success - build valid handle
h.ss = ss;
h.meta = m;
h.slab_idx = (uint8_t)idx;
h.owner_tid = tid;
if (__builtin_expect(g_debug_remote_guard, 0)) {
uint32_t cur = __atomic_load_n(&m->owner_tid, __ATOMIC_RELAXED);
if (cur != tid || cur == 0) {
tiny_debug_ring_record(TINY_RING_EVENT_REMOTE_INVALID,
(uint16_t)ss->size_class,
m,
((uintptr_t)cur << 32) | (uintptr_t)tid);
if (g_tiny_safe_free_strict) {
raise(SIGUSR2);
}
h.valid = 0;
return h;
}
uintptr_t aux = ((uintptr_t)h.slab_idx << 32) | (uintptr_t)tid;
tiny_debug_ring_record(TINY_RING_EVENT_OWNER_ACQUIRE,
(uint16_t)ss->size_class,
m,
aux);
}
h.valid = 1;
return h;
}
// Forward declaration for internal unsafe drain function
extern void _ss_remote_drain_to_freelist_unsafe(SuperSlab* ss, int slab_idx, TinySlabMeta* meta);
static inline int slab_remote_pending(const SlabHandle* h) {
if (!h || !h->valid) return 0;
uintptr_t head = atomic_load_explicit(&h->ss->remote_heads[h->slab_idx], memory_order_acquire);
return head != 0;
}
// Drain remote queue to freelist (Box 2: Remote Queue boundary)
// Requires: h->valid == 1 (ownership verified)
// Effect: Merges remote queue into freelist, resets remote_counts
static inline void slab_drain_remote(SlabHandle* h) {
if (!h || !h->valid) {
#ifdef HAKMEM_DEBUG_VERBOSE
fprintf(stderr, "[SLAB_HANDLE] drain_remote: invalid handle\n");
#endif
return; // Invalid handle - no-op
}
if (__builtin_expect(g_debug_remote_guard, 0)) {
uint32_t cur_owner = __atomic_load_n(&h->meta->owner_tid, __ATOMIC_RELAXED);
if (cur_owner != h->owner_tid || cur_owner == 0) {
uintptr_t aux = ((uintptr_t)cur_owner << 32) | (uintptr_t)h->owner_tid;
tiny_debug_ring_record(TINY_RING_EVENT_REMOTE_INVALID,
(uint16_t)h->ss->size_class,
h->meta,
aux);
if (g_tiny_safe_free_strict) {
raise(SIGUSR2);
return;
}
}
}
// Ownership is guaranteed by valid==1
// Safe to call internal unsafe version (ownership already verified)
_ss_remote_drain_to_freelist_unsafe(h->ss, h->slab_idx, h->meta);
}
static inline void slab_drain_remote_full(SlabHandle* h) {
if (!h || !h->valid) return;
for (int attempt = 0; attempt < 32; attempt++) {
if (!slab_remote_pending(h)) break;
slab_drain_remote(h);
}
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t head = atomic_load_explicit(&h->ss->remote_heads[h->slab_idx], memory_order_relaxed);
if (head != 0) {
tiny_remote_watch_note("drain_pending",
h->ss,
h->slab_idx,
(void*)head,
0xA242u,
h->owner_tid,
0);
}
}
}
// Get metadata pointer (read-only access)
// Returns: Cached metadata pointer if valid, NULL otherwise
static inline TinySlabMeta* slab_meta(SlabHandle* h) {
return (h && h->valid) ? h->meta : NULL;
}
// Release ownership (optional - for explicit cleanup)
// Effect: Resets owner_tid to 0, invalidates handle
static inline void slab_release(SlabHandle* h) {
if (!h || !h->valid) {
return; // Already invalid
}
if (__builtin_expect(g_debug_remote_guard, 0)) {
uint32_t cur_owner = __atomic_load_n(&h->meta->owner_tid, __ATOMIC_RELAXED);
uintptr_t aux = ((uintptr_t)h->slab_idx << 32) | (uintptr_t)cur_owner;
tiny_debug_ring_record(TINY_RING_EVENT_OWNER_RELEASE,
(uint16_t)(h->ss ? h->ss->size_class : 0u),
h->meta,
aux);
if (cur_owner != h->owner_tid || cur_owner == 0) {
tiny_debug_ring_record(TINY_RING_EVENT_REMOTE_INVALID,
(uint16_t)(h->ss ? h->ss->size_class : 0u),
h->meta,
((uintptr_t)cur_owner << 32) | (uintptr_t)h->owner_tid);
if (g_tiny_safe_free_strict) {
raise(SIGUSR2);
}
}
}
// Release ownership (Box 3: Ownership)
__atomic_store_n(&h->meta->owner_tid, 0u, __ATOMIC_RELEASE);
h->valid = 0;
h->owner_tid = 0;
}
// Check if handle is valid (owned and safe to use)
static inline int slab_is_valid(SlabHandle* h) {
return (h && h->valid) ? 1 : 0;
}
// Get freelist pointer (convenience accessor)
static inline void* slab_freelist(SlabHandle* h) {
if (!h || !h->valid) return NULL;
return h->meta->freelist;
}
// Get used count (convenience accessor)
static inline uint16_t slab_used(SlabHandle* h) {
if (!h || !h->valid) return 0;
return h->meta->used;
}
// Get capacity (convenience accessor)
static inline uint16_t slab_capacity(SlabHandle* h) {
if (!h || !h->valid) return 0;
return h->meta->capacity;
}
// ========== FreeList Box Operations ==========
// Box Invariant: All freelist operations REQUIRE valid==1 (ownership guaranteed)
// Push to freelist (called during free)
// Returns: 1 on success, 0 on failure (no ownership)
static inline int slab_freelist_push(SlabHandle* h, void* ptr) {
if (!h || !h->valid) {
#ifdef HAKMEM_DEBUG_VERBOSE
fprintf(stderr, "[SLAB_HANDLE] freelist_push: invalid handle (no ownership)\n");
#endif
return 0; // Box: No ownership → FAIL
}
extern int g_debug_remote_guard;
if (__builtin_expect(g_debug_remote_guard, 0)) {
uintptr_t pval = (uintptr_t)ptr;
uintptr_t fval = (uintptr_t)h->meta->freelist;
if ((pval & (sizeof(void*) - 1)) != 0 || (fval && (fval & (sizeof(void*) - 1)) != 0)) {
fprintf(stderr,
"[SLAB_HANDLE] FREELIST_ALIGN cls=%u slab=%u ptr=%p freelist=%p owner=%u used=%u\n",
h->ss ? h->ss->size_class : 0u,
(unsigned)h->slab_idx,
ptr,
h->meta->freelist,
h->meta->owner_tid,
(unsigned)h->meta->used);
}
}
// Ownership guaranteed by valid==1 → safe to modify freelist
void* old_freelist = h->meta->freelist; // Store for empty→non-empty detection
void* prev = h->meta->freelist;
*(void**)ptr = prev;
h->meta->freelist = ptr;
// Optional freelist mask update (opt-in via env HAKMEM_TINY_FREELIST_MASK)
do {
static int g_mask_en = -1;
if (__builtin_expect(g_mask_en == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FREELIST_MASK");
g_mask_en = (e && *e && *e != '0') ? 1 : 0;
}
if (__builtin_expect(g_mask_en, 0) && prev == NULL && h->ss) {
uint32_t bit = (1u << h->slab_idx);
atomic_fetch_or_explicit(&h->ss->freelist_mask, bit, memory_order_release);
}
} while (0);
if (h->meta->used > 0) h->meta->used--;
// Phase 6-2.2: Update nonempty_mask if transition empty→non-empty
if (old_freelist == NULL) {
h->ss->nonempty_mask |= (1u << h->slab_idx);
}
tiny_remote_watch_note("freelist_push", h->ss, h->slab_idx, ptr, 0xA236u, h->owner_tid, 0);
tiny_remote_track_on_local_free(h->ss, h->slab_idx, ptr, "freelist_push", h->owner_tid);
return 1;
}
// Pop from freelist (called during alloc)
// Returns: pointer on success, NULL on failure/empty
static inline void* slab_freelist_pop(SlabHandle* h) {
if (!h || !h->valid) {
#ifdef HAKMEM_DEBUG_VERBOSE
fprintf(stderr, "[SLAB_HANDLE] freelist_pop: invalid handle (no ownership)\n");
#endif
return NULL; // Box: No ownership → FAIL
}
void* ptr = h->meta->freelist;
if (ptr) {
void* next = *(void**)ptr;
h->meta->freelist = next;
h->meta->used++;
// Optional freelist mask clear when freelist becomes empty
do {
static int g_mask_en2 = -1;
if (__builtin_expect(g_mask_en2 == -1, 0)) {
const char* e = getenv("HAKMEM_TINY_FREELIST_MASK");
g_mask_en2 = (e && *e && *e != '0') ? 1 : 0;
}
if (__builtin_expect(g_mask_en2, 0) && next == NULL && h->ss) {
uint32_t bit = (1u << h->slab_idx);
atomic_fetch_and_explicit(&h->ss->freelist_mask, ~bit, memory_order_release);
}
} while (0);
// Phase 6-2.2: Update nonempty_mask if transition non-empty→empty
if (h->meta->freelist == NULL) {
h->ss->nonempty_mask &= ~(1u << h->slab_idx);
}
tiny_remote_watch_note("freelist_pop", h->ss, h->slab_idx, ptr, 0xA237u, h->owner_tid, 0);
tiny_remote_assert_not_remote(h->ss, h->slab_idx, ptr, "freelist_pop_ret", h->owner_tid);
tiny_remote_track_on_alloc(h->ss, h->slab_idx, ptr, "freelist_pop", h->owner_tid);
}
return ptr;
}
// Check if freelist is non-empty (read-only, safe without ownership)
static inline int slab_has_freelist(SlabHandle* h) {
if (!h || !h->valid) return 0;
return (h->meta->freelist != NULL);
}
// ========== Box 4 Boundary: Adopt/Bind Safe Guard ==========
// Box Invariant: bind は remote_head==0 を保証する必要がある
// TOCTOU Race 防止: drain 後から bind 前の間に別スレッドが remote push する可能性
// Check if slab is safe to bind (TOCTOU-safe check)
// Returns: 1 if safe (freelist exists AND remote_head==0), 0 otherwise
// Usage: Must be called immediately before bind to prevent TOCTOU race
//
// Example (correct usage - TOCTOU-safe):
// SlabHandle h = slab_try_acquire(ss, idx, tid);
// if (slab_is_valid(&h)) {
// slab_drain_remote_full(&h);
// if (slab_is_safe_to_bind(&h)) { // ← bind 直前に再チェック
// tiny_tls_bind_slab(tls, h.ss, h.slab_idx);
// return h.ss;
// }
// slab_release(&h);
// }
//
// Example (incorrect - TOCTOU race):
// slab_drain_remote_full(&h);
// if (!slab_remote_pending(&h)) { // ← チェック
// // ★ここで別スレッドが remote push できる!
// tiny_tls_bind_slab(...); // ← bind時に remote pending かも
// }
static inline int slab_is_safe_to_bind(SlabHandle* h) {
if (!h || !h->valid) return 0;
if (!slab_freelist(h)) return 0;
// Box 4 Boundary: bind 直前の最終確認TOCTOU 防止)
return !slab_remote_pending(h);
}