2025-11-09 18:55:50 +09:00
|
|
|
#include "pool_tls_registry.h"
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <string.h>
|
2025-11-14 14:29:05 +09:00
|
|
|
#include <stdatomic.h>
|
2025-11-09 18:55:50 +09:00
|
|
|
|
|
|
|
|
typedef struct RegEntry {
|
|
|
|
|
void* base;
|
|
|
|
|
void* end;
|
|
|
|
|
pid_t tid;
|
|
|
|
|
int class_idx;
|
2025-11-14 14:29:05 +09:00
|
|
|
_Atomic(struct RegEntry*) next; // Atomic for lock-free reads
|
2025-11-09 18:55:50 +09:00
|
|
|
} RegEntry;
|
|
|
|
|
|
|
|
|
|
#define REG_BUCKETS 1024
|
2025-11-14 14:29:05 +09:00
|
|
|
static _Atomic(RegEntry*) g_buckets[REG_BUCKETS]; // Atomic buckets for lock-free reads
|
|
|
|
|
static pthread_mutex_t g_locks[REG_BUCKETS]; // Only for registration/unregistration
|
2025-11-09 18:55:50 +09:00
|
|
|
static pthread_once_t g_init_once = PTHREAD_ONCE_INIT;
|
|
|
|
|
|
|
|
|
|
static void reg_init(void){
|
2025-11-14 14:29:05 +09:00
|
|
|
for (int i=0;i<REG_BUCKETS;i++) {
|
|
|
|
|
pthread_mutex_init(&g_locks[i], NULL);
|
|
|
|
|
atomic_store_explicit(&g_buckets[i], NULL, memory_order_relaxed);
|
|
|
|
|
}
|
2025-11-09 18:55:50 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline uint64_t hash_ptr(void* p){
|
|
|
|
|
uintptr_t x=(uintptr_t)p; x ^= x>>33; x*=0xff51afd7ed558ccdULL; x ^= x>>33; x*=0xc4ceb9fe1a85ec53ULL; x ^= x>>33; return x;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void pool_reg_register(void* base, size_t size, pid_t tid, int class_idx){
|
|
|
|
|
pthread_once(&g_init_once, reg_init);
|
|
|
|
|
void* end = (void*)((char*)base + size);
|
|
|
|
|
uint64_t h = hash_ptr(base) & (REG_BUCKETS-1);
|
|
|
|
|
pthread_mutex_lock(&g_locks[h]);
|
|
|
|
|
RegEntry* e = (RegEntry*)malloc(sizeof(RegEntry));
|
2025-11-14 14:29:05 +09:00
|
|
|
e->base = base; e->end = end; e->tid = tid; e->class_idx = class_idx;
|
|
|
|
|
RegEntry* old_head = atomic_load_explicit(&g_buckets[h], memory_order_relaxed);
|
|
|
|
|
atomic_store_explicit(&e->next, old_head, memory_order_relaxed);
|
|
|
|
|
atomic_store_explicit(&g_buckets[h], e, memory_order_release);
|
2025-11-09 18:55:50 +09:00
|
|
|
pthread_mutex_unlock(&g_locks[h]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void pool_reg_unregister(void* base, size_t size, pid_t tid){
|
|
|
|
|
pthread_once(&g_init_once, reg_init);
|
|
|
|
|
uint64_t h = hash_ptr(base) & (REG_BUCKETS-1);
|
|
|
|
|
pthread_mutex_lock(&g_locks[h]);
|
2025-11-14 14:29:05 +09:00
|
|
|
|
|
|
|
|
// Need to carefully update atomic pointers
|
|
|
|
|
_Atomic(RegEntry*)* pp = &g_buckets[h];
|
|
|
|
|
RegEntry* e = atomic_load_explicit(pp, memory_order_relaxed);
|
|
|
|
|
RegEntry* prev = NULL;
|
|
|
|
|
|
|
|
|
|
while (e){
|
2025-11-09 18:55:50 +09:00
|
|
|
if (e->base == base && e->tid == tid){
|
2025-11-14 14:29:05 +09:00
|
|
|
RegEntry* next = atomic_load_explicit(&e->next, memory_order_relaxed);
|
|
|
|
|
if (prev == NULL) {
|
|
|
|
|
atomic_store_explicit(&g_buckets[h], next, memory_order_release);
|
|
|
|
|
} else {
|
|
|
|
|
atomic_store_explicit(&prev->next, next, memory_order_release);
|
|
|
|
|
}
|
|
|
|
|
free(e);
|
|
|
|
|
break;
|
2025-11-09 18:55:50 +09:00
|
|
|
}
|
2025-11-14 14:29:05 +09:00
|
|
|
prev = e;
|
|
|
|
|
e = atomic_load_explicit(&e->next, memory_order_relaxed);
|
2025-11-09 18:55:50 +09:00
|
|
|
}
|
|
|
|
|
pthread_mutex_unlock(&g_locks[h]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int pool_reg_lookup(void* ptr, pid_t* tid_out, int* class_idx_out){
|
|
|
|
|
pthread_once(&g_init_once, reg_init);
|
|
|
|
|
uint64_t h = hash_ptr(ptr) & (REG_BUCKETS-1);
|
2025-11-14 14:29:05 +09:00
|
|
|
|
|
|
|
|
// Lock-free lookup! No mutex needed for reads
|
|
|
|
|
RegEntry* e = atomic_load_explicit(&g_buckets[h], memory_order_acquire);
|
|
|
|
|
while (e) {
|
|
|
|
|
// Load entry fields (they're stable after registration)
|
|
|
|
|
void* base = e->base;
|
|
|
|
|
void* end = e->end;
|
|
|
|
|
|
|
|
|
|
if (ptr >= base && ptr < end){
|
2025-11-09 18:55:50 +09:00
|
|
|
if (tid_out) *tid_out = e->tid;
|
|
|
|
|
if (class_idx_out) *class_idx_out = e->class_idx;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
2025-11-14 14:29:05 +09:00
|
|
|
e = atomic_load_explicit(&e->next, memory_order_acquire);
|
2025-11-09 18:55:50 +09:00
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|