2025-11-07 01:27:04 +09:00
|
|
|
|
// tiny_ready.h - Ready List box (per-class, slab-entry hints)
|
|
|
|
|
|
// Purpose: O(1)-ish adopt candidate discovery to bypass deep scans in refill.
|
|
|
|
|
|
// Design: Lock-free ring of encoded slab entries (ss+slab_idx). Best-effort hints.
|
|
|
|
|
|
// Boundary:
|
|
|
|
|
|
// - Producer: publish境界(ss_partial_publish)/ remote初入荷 / first-free(prev==NULL)で push
|
|
|
|
|
|
// - Consumer: refill境界(tiny_refill_try_fast の最初)で pop→owner取得→bind
|
2025-11-26 14:45:26 +09:00
|
|
|
|
// Runtime ENV toggle removed: Ready ring is always enabled (fixed behavior)
|
2025-11-07 01:27:04 +09:00
|
|
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
#include <stdatomic.h>
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
#include "hakmem_tiny.h"
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef TINY_READY_RING
|
|
|
|
|
|
#define TINY_READY_RING 128
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
// Per-class ring buffer of encoded slab entries
|
|
|
|
|
|
static _Atomic(uintptr_t) g_ready_ring[TINY_NUM_CLASSES][TINY_READY_RING];
|
|
|
|
|
|
static _Atomic(uint32_t) g_ready_rr[TINY_NUM_CLASSES];
|
|
|
|
|
|
|
2025-11-26 14:45:26 +09:00
|
|
|
|
static inline int tiny_ready_enabled(void) { return 1; }
|
2025-11-07 01:27:04 +09:00
|
|
|
|
|
2025-11-26 14:45:26 +09:00
|
|
|
|
// Optional: limit scan width (ENV toggle removed; width is fixed to TINY_READY_RING)
|
|
|
|
|
|
static inline int tiny_ready_width(void) { return TINY_READY_RING; }
|
2025-11-07 01:27:04 +09:00
|
|
|
|
|
|
|
|
|
|
// Encode helpers are declared in main TU; forward here
|
|
|
|
|
|
static inline uintptr_t slab_entry_make(SuperSlab* ss, int slab_idx);
|
|
|
|
|
|
static inline SuperSlab* slab_entry_ss(uintptr_t ent);
|
|
|
|
|
|
static inline int slab_entry_idx(uintptr_t ent);
|
|
|
|
|
|
|
|
|
|
|
|
// Push: best-effort, tries a few slots, drops on contention (hint-only)
|
|
|
|
|
|
static inline void tiny_ready_push(int class_idx, SuperSlab* ss, int slab_idx) {
|
|
|
|
|
|
if (!tiny_ready_enabled()) return;
|
|
|
|
|
|
if (__builtin_expect(class_idx < 0 || class_idx >= TINY_NUM_CLASSES, 0)) return;
|
|
|
|
|
|
if (__builtin_expect(ss == NULL || slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss), 0)) return;
|
|
|
|
|
|
|
|
|
|
|
|
uintptr_t ent = slab_entry_make(ss, slab_idx);
|
|
|
|
|
|
uint32_t start = atomic_fetch_add_explicit(&g_ready_rr[class_idx], 1u, memory_order_relaxed);
|
|
|
|
|
|
// Try up to 4 slots to reduce collisions
|
|
|
|
|
|
for (int k = 0; k < 4; k++) {
|
|
|
|
|
|
uint32_t idx = (start + (uint32_t)k) % (uint32_t)TINY_READY_RING;
|
|
|
|
|
|
uintptr_t expected = 0;
|
|
|
|
|
|
if (atomic_compare_exchange_weak_explicit(&g_ready_ring[class_idx][idx], &expected, ent,
|
|
|
|
|
|
memory_order_release, memory_order_relaxed)) {
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
// Drop if all tried slots were busy (hint ring, loss is acceptable)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Pop any entry; scans ring once (only on refill miss, not on hot path)
|
|
|
|
|
|
static inline uintptr_t tiny_ready_pop(int class_idx) {
|
|
|
|
|
|
if (!tiny_ready_enabled()) return (uintptr_t)0;
|
|
|
|
|
|
if (__builtin_expect(class_idx < 0 || class_idx >= TINY_NUM_CLASSES, 0)) return (uintptr_t)0;
|
|
|
|
|
|
int scan = tiny_ready_width();
|
|
|
|
|
|
for (int i = 0; i < scan; i++) {
|
|
|
|
|
|
uintptr_t ent = atomic_exchange_explicit(&g_ready_ring[class_idx][i], (uintptr_t)0, memory_order_acq_rel);
|
|
|
|
|
|
if (ent) return ent;
|
|
|
|
|
|
}
|
|
|
|
|
|
return (uintptr_t)0;
|
|
|
|
|
|
}
|