Files
hakmem/core/pool_tls_remote.c
Moe Charm (CI) 1010a961fb Tiny: fix header/stride mismatch and harden refill paths
- Root cause: header-based class indexing (HEADER_CLASSIDX=1) wrote a 1-byte
  header during allocation, but linear carve/refill and initial slab capacity
  still used bare class block sizes. This mismatch could overrun slab usable
  space and corrupt freelists, causing reproducible SEGV at ~100k iters.

Changes
- Superslab: compute capacity with effective stride (block_size + header for
  classes 0..6; class7 remains headerless) in superslab_init_slab(). Add a
  debug-only bound check in superslab_alloc_from_slab() to fail fast if carve
  would exceed usable bytes.
- Refill (non-P0 and P0): use header-aware stride for all linear carving and
  TLS window bump operations. Ensure alignment/validation in tiny_refill_opt.h
  also uses stride, not raw class size.
- Drain: keep existing defense-in-depth for remote sentinel and sanitize nodes
  before splicing into freelist (already present).

Notes
- This unifies the memory layout across alloc/linear-carve/refill with a single
  stride definition and keeps class7 (1024B) headerless as designed.
- Debug builds add fail-fast checks; release builds remain lean.

Next
- Re-run Tiny benches (256/1024B) in debug to confirm stability, then in
  release. If any remaining crash persists, bisect with HAKMEM_TINY_P0_BATCH_REFILL=0
  to isolate P0 batch carve, and continue reducing branch-miss as planned.
2025-11-09 18:55:50 +09:00

73 lines
2.1 KiB
C

#include "pool_tls_remote.h"
#include <pthread.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <unistd.h>
#define REMOTE_BUCKETS 256
typedef struct RemoteRec {
int tid;
void* head[7];
int count[7];
struct RemoteRec* next;
} RemoteRec;
static RemoteRec* g_buckets[REMOTE_BUCKETS];
static pthread_mutex_t g_locks[REMOTE_BUCKETS];
static pthread_once_t g_once = PTHREAD_ONCE_INIT;
static void rq_init(void){
for (int i=0;i<REMOTE_BUCKETS;i++) pthread_mutex_init(&g_locks[i], NULL);
}
static inline unsigned hb(int tid){ return (unsigned)tid & (REMOTE_BUCKETS-1); }
int pool_remote_push(int class_idx, void* ptr, int owner_tid){
if (class_idx < 0 || class_idx > 6 || ptr == NULL) return 0;
pthread_once(&g_once, rq_init);
unsigned b = hb(owner_tid);
pthread_mutex_lock(&g_locks[b]);
RemoteRec* r = g_buckets[b];
while (r && r->tid != owner_tid) r = r->next;
if (!r){
r = (RemoteRec*)calloc(1, sizeof(RemoteRec));
r->tid = owner_tid; r->next = g_buckets[b]; g_buckets[b] = r;
}
*(void**)ptr = r->head[class_idx];
r->head[class_idx] = ptr;
r->count[class_idx]++;
pthread_mutex_unlock(&g_locks[b]);
return 1;
}
// Drain up to a small batch for this thread and class
int pool_remote_pop_chain(int class_idx, int max_take, void** out_chain){
if (class_idx < 0 || class_idx > 6 || out_chain==NULL) return 0;
pthread_once(&g_once, rq_init);
int mytid = (int)syscall(SYS_gettid);
unsigned b = hb(mytid);
pthread_mutex_lock(&g_locks[b]);
RemoteRec* r = g_buckets[b];
while (r && r->tid != mytid) r = r->next;
int drained = 0;
if (r){
// Pop up to max_take nodes and return chain
void* head = r->head[class_idx];
int batch = 0; if (max_take <= 0) max_take = 32;
void* chain = NULL; void* tail = NULL;
while (head && batch < max_take){
void* nxt = *(void**)head;
if (!chain){ chain = head; tail = head; }
else { *(void**)tail = head; tail = head; }
head = nxt; batch++;
}
r->head[class_idx] = head;
r->count[class_idx] -= batch;
drained = batch;
*out_chain = chain;
}
pthread_mutex_unlock(&g_locks[b]);
return drained;
}