2025-11-07 23:05:33 +09:00
|
|
|
#ifndef SUPERSLAB_INLINE_H
|
|
|
|
|
#define SUPERSLAB_INLINE_H
|
|
|
|
|
|
|
|
|
|
#include "superslab_types.h"
|
2025-11-22 07:40:35 +09:00
|
|
|
#include "../tiny_box_geometry.h" // Box 3 geometry helpers (stride/base/capacity)
|
2025-11-07 23:05:33 +09:00
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Forward declaration for unsafe remote drain used by refill/handle paths
|
|
|
|
|
// Implemented in hakmem_tiny_superslab.c
|
|
|
|
|
void _ss_remote_drain_to_freelist_unsafe(SuperSlab* ss, int slab_idx, TinySlabMeta* meta);
|
2025-11-07 23:05:33 +09:00
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Optional debug counter (defined in hakmem_tiny_superslab.c)
|
|
|
|
|
extern _Atomic uint64_t g_ss_active_dec_calls;
|
2025-11-07 23:05:33 +09:00
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Return maximum number of slabs for this SuperSlab based on lg_size.
|
|
|
|
|
static inline int ss_slabs_capacity(SuperSlab* ss)
|
|
|
|
|
{
|
|
|
|
|
if (!ss) return 0;
|
2025-11-07 23:05:33 +09:00
|
|
|
size_t ss_size = (size_t)1 << ss->lg_size;
|
2025-11-14 01:02:00 +09:00
|
|
|
return (int)(ss_size / SLAB_SIZE);
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Compute slab base pointer for given (ss, slab_idx).
|
2025-11-22 07:40:35 +09:00
|
|
|
// Box 5 wrapper: delegate to Box 3 canonical geometry helper.
|
2025-11-14 01:02:00 +09:00
|
|
|
static inline uint8_t* tiny_slab_base_for(SuperSlab* ss, int slab_idx)
|
|
|
|
|
{
|
2025-11-22 07:40:35 +09:00
|
|
|
if (!ss || slab_idx < 0) {
|
2025-11-14 01:02:00 +09:00
|
|
|
return NULL;
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
2025-11-22 07:40:35 +09:00
|
|
|
return tiny_slab_base_for_geometry(ss, slab_idx);
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Compute slab index for a pointer inside ss.
|
2025-11-22 07:56:06 +09:00
|
|
|
// Box 5 wrapper: inverse of Box 3 geometry (tiny_slab_base_for_geometry).
|
|
|
|
|
// Layout (data regions):
|
|
|
|
|
// - Slab 0: [ss + SUPERSLAB_SLAB0_DATA_OFFSET, ss + SLAB_SIZE)
|
|
|
|
|
// - Slab 1: [ss + 1*SLAB_SIZE, ss + 2*SLAB_SIZE)
|
|
|
|
|
// - Slab k: [ss + k*SLAB_SIZE, ss + (k+1)*SLAB_SIZE)
|
2025-11-14 01:02:00 +09:00
|
|
|
static inline int slab_index_for(SuperSlab* ss, void* ptr)
|
|
|
|
|
{
|
2025-11-22 07:56:06 +09:00
|
|
|
if (!ss || !ptr) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
2025-11-07 23:05:33 +09:00
|
|
|
|
|
|
|
|
uintptr_t base = (uintptr_t)ss;
|
2025-11-14 01:02:00 +09:00
|
|
|
uintptr_t p = (uintptr_t)ptr;
|
|
|
|
|
size_t ss_size = (size_t)1 << ss->lg_size;
|
2025-11-07 23:05:33 +09:00
|
|
|
|
2025-11-22 07:56:06 +09:00
|
|
|
// Outside overall SuperSlab range
|
2025-11-14 01:02:00 +09:00
|
|
|
if (p < base + SUPERSLAB_SLAB0_DATA_OFFSET || p >= base + ss_size) {
|
|
|
|
|
return -1;
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 07:56:06 +09:00
|
|
|
// Slab 0: from first data byte up to the end of first slab
|
|
|
|
|
if (p < base + SLAB_SIZE) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Slabs 1+ use simple SLAB_SIZE spacing from SuperSlab base
|
|
|
|
|
size_t rel = p - base;
|
2025-11-14 01:02:00 +09:00
|
|
|
int idx = (int)(rel / SLAB_SIZE);
|
|
|
|
|
if (idx < 0 || idx >= SLABS_PER_SUPERSLAB_MAX) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
return idx;
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Simple ref helpers used by lifecycle paths.
|
|
|
|
|
static inline uint32_t superslab_ref_get(SuperSlab* ss)
|
|
|
|
|
{
|
|
|
|
|
return ss ? atomic_load_explicit(&ss->refcount, memory_order_acquire) : 0;
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
static inline void superslab_ref_inc(SuperSlab* ss)
|
|
|
|
|
{
|
|
|
|
|
if (ss) {
|
|
|
|
|
atomic_fetch_add_explicit(&ss->refcount, 1, memory_order_acq_rel);
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
static inline void superslab_ref_dec(SuperSlab* ss)
|
|
|
|
|
{
|
|
|
|
|
if (ss) {
|
|
|
|
|
uint32_t prev = atomic_fetch_sub_explicit(&ss->refcount, 1, memory_order_acq_rel);
|
|
|
|
|
(void)prev; // caller decides when to free; we just provide the primitive
|
2025-11-09 16:49:34 +09:00
|
|
|
}
|
2025-11-14 01:02:00 +09:00
|
|
|
}
|
2025-11-07 23:05:33 +09:00
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Ownership helpers (Box 3)
|
|
|
|
|
static inline int ss_owner_try_acquire(TinySlabMeta* m, uint32_t tid)
|
|
|
|
|
{
|
|
|
|
|
if (!m) return 0;
|
2025-11-27 11:52:11 +09:00
|
|
|
uint8_t want = (uint8_t)((tid >> 8) & 0xFFu);
|
2025-11-14 01:02:00 +09:00
|
|
|
uint8_t expected = 0;
|
|
|
|
|
return __atomic_compare_exchange_n(&m->owner_tid_low, &expected, want,
|
|
|
|
|
false, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
static inline void ss_owner_release(TinySlabMeta* m, uint32_t tid)
|
|
|
|
|
{
|
|
|
|
|
if (!m) return;
|
2025-11-27 11:52:11 +09:00
|
|
|
uint8_t expected = (uint8_t)((tid >> 8) & 0xFFu);
|
2025-11-14 01:02:00 +09:00
|
|
|
(void)__atomic_compare_exchange_n(&m->owner_tid_low, &expected, 0u,
|
|
|
|
|
false, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
static inline int ss_owner_is_mine(TinySlabMeta* m, uint32_t tid)
|
|
|
|
|
{
|
|
|
|
|
if (!m) return 0;
|
2025-11-13 16:33:03 +09:00
|
|
|
uint8_t cur = __atomic_load_n(&m->owner_tid_low, __ATOMIC_RELAXED);
|
2025-11-27 11:52:11 +09:00
|
|
|
return cur == (uint8_t)((tid >> 8) & 0xFFu);
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Active block accounting (saturating dec by 1)
|
|
|
|
|
static inline void ss_active_dec_one(SuperSlab* ss)
|
|
|
|
|
{
|
2025-11-07 23:05:33 +09:00
|
|
|
if (!ss) return;
|
2025-11-14 01:02:00 +09:00
|
|
|
atomic_fetch_add_explicit(&g_ss_active_dec_calls, 1, memory_order_relaxed);
|
|
|
|
|
uint32_t cur = atomic_load_explicit(&ss->total_active_blocks, memory_order_relaxed);
|
|
|
|
|
while (cur != 0) {
|
|
|
|
|
if (atomic_compare_exchange_weak_explicit(&ss->total_active_blocks,
|
|
|
|
|
&cur,
|
|
|
|
|
cur - 1u,
|
|
|
|
|
memory_order_acq_rel,
|
|
|
|
|
memory_order_relaxed)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
// cur updated by failed CAS; loop
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-14 01:02:00 +09:00
|
|
|
// Remote push helper (Box 2):
|
|
|
|
|
// - Enqueue node to per-slab MPSC stack
|
|
|
|
|
// - Returns 1 if transition empty->nonempty, otherwise 0
|
|
|
|
|
// - Also decrements ss->total_active_blocks once (free completed)
|
|
|
|
|
static inline int ss_remote_push(SuperSlab* ss, int slab_idx, void* node)
|
|
|
|
|
{
|
|
|
|
|
if (!ss || slab_idx < 0 || slab_idx >= SLABS_PER_SUPERSLAB_MAX || !node) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_Atomic uintptr_t* head = &ss->remote_heads[slab_idx];
|
|
|
|
|
uintptr_t old_head;
|
|
|
|
|
uintptr_t new_head;
|
|
|
|
|
int transitioned = 0;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
old_head = atomic_load_explicit(head, memory_order_acquire);
|
|
|
|
|
// next ポインタは tiny_next_ptr_box / tiny_nextptr 等で扱う前提だが、
|
|
|
|
|
// ここでは単純に単方向リストとして積む(上位が decode する)。
|
|
|
|
|
*(uintptr_t*)node = old_head;
|
|
|
|
|
new_head = (uintptr_t)node;
|
|
|
|
|
} while (!atomic_compare_exchange_weak_explicit(
|
|
|
|
|
head, &old_head, new_head,
|
|
|
|
|
memory_order_release, memory_order_relaxed));
|
|
|
|
|
transitioned = (old_head == 0) ? 1 : 0;
|
|
|
|
|
atomic_fetch_add_explicit(&ss->remote_counts[slab_idx], 1, memory_order_acq_rel);
|
|
|
|
|
|
|
|
|
|
// account active block removal once per free
|
|
|
|
|
ss_active_dec_one(ss);
|
|
|
|
|
return transitioned;
|
2025-11-07 23:05:33 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif // SUPERSLAB_INLINE_H
|