41 lines
1.9 KiB
C
41 lines
1.9 KiB
C
|
|
#include "hakmem_tiny_remote_target.h"
|
||
|
|
#include "hakmem_tiny.h" // For TinySlab definition
|
||
|
|
|
||
|
|
// Global variables
|
||
|
|
int g_bg_remote_enable = 0; // HAKMEM_TINY_BG_REMOTE=1
|
||
|
|
_Atomic uintptr_t g_remote_target_head[TINY_NUM_CLASSES];
|
||
|
|
_Atomic uint32_t g_remote_target_len[TINY_NUM_CLASSES];
|
||
|
|
int g_bg_remote_batch = 32; // HAKMEM_TINY_BG_REMOTE_BATCH
|
||
|
|
|
||
|
|
void remote_target_enqueue(int class_idx, TinySlab* slab) {
|
||
|
|
// Best-effort: mark as enqueued once to avoid duplicate pushes
|
||
|
|
unsigned was = atomic_exchange_explicit(&slab->remote_queued, 1u, memory_order_acq_rel);
|
||
|
|
if (was) return;
|
||
|
|
// Link into per-class Treiber stack
|
||
|
|
uintptr_t old_head;
|
||
|
|
do {
|
||
|
|
old_head = atomic_load_explicit(&g_remote_target_head[class_idx], memory_order_acquire);
|
||
|
|
slab->remote_q_next = (TinySlab*)old_head;
|
||
|
|
} while (!atomic_compare_exchange_weak_explicit(&g_remote_target_head[class_idx], &old_head,
|
||
|
|
(uintptr_t)slab,
|
||
|
|
memory_order_release, memory_order_relaxed));
|
||
|
|
atomic_fetch_add_explicit(&g_remote_target_len[class_idx], 1u, memory_order_relaxed);
|
||
|
|
}
|
||
|
|
|
||
|
|
TinySlab* remote_target_pop(int class_idx) {
|
||
|
|
uintptr_t head;
|
||
|
|
TinySlab* node;
|
||
|
|
do {
|
||
|
|
head = atomic_load_explicit(&g_remote_target_head[class_idx], memory_order_acquire);
|
||
|
|
if (head == 0) return NULL;
|
||
|
|
node = (TinySlab*)head;
|
||
|
|
} while (!atomic_compare_exchange_weak_explicit(&g_remote_target_head[class_idx], &head,
|
||
|
|
(uintptr_t)node->remote_q_next,
|
||
|
|
memory_order_acq_rel, memory_order_relaxed));
|
||
|
|
// Mark dequeued so it can be re-enqueued later if needed
|
||
|
|
atomic_store_explicit(&node->remote_queued, 0u, memory_order_release);
|
||
|
|
atomic_fetch_sub_explicit(&g_remote_target_len[class_idx], 1u, memory_order_relaxed);
|
||
|
|
node->remote_q_next = NULL;
|
||
|
|
return node;
|
||
|
|
}
|