Files
hakmem/core/pool_tls_registry.c
Moe Charm (CI) 1010a961fb Tiny: fix header/stride mismatch and harden refill paths
- Root cause: header-based class indexing (HEADER_CLASSIDX=1) wrote a 1-byte
  header during allocation, but linear carve/refill and initial slab capacity
  still used bare class block sizes. This mismatch could overrun slab usable
  space and corrupt freelists, causing reproducible SEGV at ~100k iters.

Changes
- Superslab: compute capacity with effective stride (block_size + header for
  classes 0..6; class7 remains headerless) in superslab_init_slab(). Add a
  debug-only bound check in superslab_alloc_from_slab() to fail fast if carve
  would exceed usable bytes.
- Refill (non-P0 and P0): use header-aware stride for all linear carving and
  TLS window bump operations. Ensure alignment/validation in tiny_refill_opt.h
  also uses stride, not raw class size.
- Drain: keep existing defense-in-depth for remote sentinel and sanitize nodes
  before splicing into freelist (already present).

Notes
- This unifies the memory layout across alloc/linear-carve/refill with a single
  stride definition and keeps class7 (1024B) headerless as designed.
- Debug builds add fail-fast checks; release builds remain lean.

Next
- Re-run Tiny benches (256/1024B) in debug to confirm stability, then in
  release. If any remaining crash persists, bisect with HAKMEM_TINY_P0_BATCH_REFILL=0
  to isolate P0 batch carve, and continue reducing branch-miss as planned.
2025-11-09 18:55:50 +09:00

69 lines
2.0 KiB
C

#include "pool_tls_registry.h"
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
typedef struct RegEntry {
void* base;
void* end;
pid_t tid;
int class_idx;
struct RegEntry* next;
} RegEntry;
#define REG_BUCKETS 1024
static RegEntry* g_buckets[REG_BUCKETS];
static pthread_mutex_t g_locks[REG_BUCKETS];
static pthread_once_t g_init_once = PTHREAD_ONCE_INIT;
static void reg_init(void){
for (int i=0;i<REG_BUCKETS;i++) pthread_mutex_init(&g_locks[i], NULL);
}
static inline uint64_t hash_ptr(void* p){
uintptr_t x=(uintptr_t)p; x ^= x>>33; x*=0xff51afd7ed558ccdULL; x ^= x>>33; x*=0xc4ceb9fe1a85ec53ULL; x ^= x>>33; return x;
}
void pool_reg_register(void* base, size_t size, pid_t tid, int class_idx){
pthread_once(&g_init_once, reg_init);
void* end = (void*)((char*)base + size);
uint64_t h = hash_ptr(base) & (REG_BUCKETS-1);
pthread_mutex_lock(&g_locks[h]);
RegEntry* e = (RegEntry*)malloc(sizeof(RegEntry));
e->base = base; e->end = end; e->tid = tid; e->class_idx = class_idx; e->next = g_buckets[h];
g_buckets[h] = e;
pthread_mutex_unlock(&g_locks[h]);
}
void pool_reg_unregister(void* base, size_t size, pid_t tid){
pthread_once(&g_init_once, reg_init);
uint64_t h = hash_ptr(base) & (REG_BUCKETS-1);
pthread_mutex_lock(&g_locks[h]);
RegEntry** pp = &g_buckets[h];
while (*pp){
RegEntry* e = *pp;
if (e->base == base && e->tid == tid){
*pp = e->next; free(e); break;
}
pp = &e->next;
}
pthread_mutex_unlock(&g_locks[h]);
}
int pool_reg_lookup(void* ptr, pid_t* tid_out, int* class_idx_out){
pthread_once(&g_init_once, reg_init);
uint64_t h = hash_ptr(ptr) & (REG_BUCKETS-1);
pthread_mutex_lock(&g_locks[h]);
for (RegEntry* e = g_buckets[h]; e; e=e->next){
if (ptr >= e->base && ptr < e->end){
if (tid_out) *tid_out = e->tid;
if (class_idx_out) *class_idx_out = e->class_idx;
pthread_mutex_unlock(&g_locks[h]);
return 1;
}
}
pthread_mutex_unlock(&g_locks[h]);
return 0;
}