Files
hakmem/core/pool_refill_legacy.c.bak
Moe Charm (CI) 1010a961fb Tiny: fix header/stride mismatch and harden refill paths
- Root cause: header-based class indexing (HEADER_CLASSIDX=1) wrote a 1-byte
  header during allocation, but linear carve/refill and initial slab capacity
  still used bare class block sizes. This mismatch could overrun slab usable
  space and corrupt freelists, causing reproducible SEGV at ~100k iters.

Changes
- Superslab: compute capacity with effective stride (block_size + header for
  classes 0..6; class7 remains headerless) in superslab_init_slab(). Add a
  debug-only bound check in superslab_alloc_from_slab() to fail fast if carve
  would exceed usable bytes.
- Refill (non-P0 and P0): use header-aware stride for all linear carving and
  TLS window bump operations. Ensure alignment/validation in tiny_refill_opt.h
  also uses stride, not raw class size.
- Drain: keep existing defense-in-depth for remote sentinel and sanitize nodes
  before splicing into freelist (already present).

Notes
- This unifies the memory layout across alloc/linear-carve/refill with a single
  stride definition and keeps class7 (1024B) headerless as designed.
- Debug builds add fail-fast checks; release builds remain lean.

Next
- Re-run Tiny benches (256/1024B) in debug to confirm stability, then in
  release. If any remaining crash persists, bisect with HAKMEM_TINY_P0_BATCH_REFILL=0
  to isolate P0 batch carve, and continue reducing branch-miss as planned.
2025-11-09 18:55:50 +09:00

105 lines
2.7 KiB
C

#include "pool_refill.h"
#include "pool_tls.h"
#include <sys/mman.h>
#include <stdint.h>
#include <errno.h>
// Get refill count from Box 1
extern int pool_get_refill_count(int class_idx);
// Refill and return first block
void* pool_refill_and_alloc(int class_idx) {
int count = pool_get_refill_count(class_idx);
if (count <= 0) return NULL;
// Batch allocate from existing Pool backend
void* chain = backend_batch_carve(class_idx, count);
if (!chain) return NULL; // OOM
// Pop first block for return
void* ret = chain;
chain = *(void**)chain;
count--;
#if POOL_USE_HEADERS
// Write header for the block we're returning
*((uint8_t*)ret - POOL_HEADER_SIZE) = POOL_MAGIC | class_idx;
#endif
// Install rest in TLS (if any)
if (count > 0 && chain) {
pool_install_chain(class_idx, chain, count);
}
return ret;
}
// Backend batch carve - Phase 1: Direct mmap allocation
void* backend_batch_carve(int class_idx, int count) {
if (class_idx < 0 || class_idx >= POOL_SIZE_CLASSES || count <= 0) {
return NULL;
}
// Get the class size
size_t block_size = POOL_CLASS_SIZES[class_idx];
// For Phase 1: Allocate a single large chunk via mmap
// and carve it into blocks
#if POOL_USE_HEADERS
size_t total_block_size = block_size + POOL_HEADER_SIZE;
#else
size_t total_block_size = block_size;
#endif
// Allocate enough for all requested blocks
size_t total_size = total_block_size * count;
// Round up to page size
size_t page_size = 4096;
total_size = (total_size + page_size - 1) & ~(page_size - 1);
// Allocate memory via mmap
void* chunk = mmap(NULL, total_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (chunk == MAP_FAILED) {
return NULL;
}
// Carve into blocks and chain them
void* head = NULL;
void* tail = NULL;
char* ptr = (char*)chunk;
for (int i = 0; i < count; i++) {
#if POOL_USE_HEADERS
// Skip header space - user data starts after header
void* user_ptr = ptr + POOL_HEADER_SIZE;
#else
void* user_ptr = ptr;
#endif
// Chain the blocks
if (!head) {
head = user_ptr;
tail = user_ptr;
} else {
*(void**)tail = user_ptr;
tail = user_ptr;
}
// Move to next block
ptr += total_block_size;
// Stop if we'd go past the allocated chunk
if ((ptr + total_block_size) > ((char*)chunk + total_size)) {
break;
}
}
// Terminate chain
if (tail) {
*(void**)tail = NULL;
}
return head;
}