- Root cause: header-based class indexing (HEADER_CLASSIDX=1) wrote a 1-byte header during allocation, but linear carve/refill and initial slab capacity still used bare class block sizes. This mismatch could overrun slab usable space and corrupt freelists, causing reproducible SEGV at ~100k iters. Changes - Superslab: compute capacity with effective stride (block_size + header for classes 0..6; class7 remains headerless) in superslab_init_slab(). Add a debug-only bound check in superslab_alloc_from_slab() to fail fast if carve would exceed usable bytes. - Refill (non-P0 and P0): use header-aware stride for all linear carving and TLS window bump operations. Ensure alignment/validation in tiny_refill_opt.h also uses stride, not raw class size. - Drain: keep existing defense-in-depth for remote sentinel and sanitize nodes before splicing into freelist (already present). Notes - This unifies the memory layout across alloc/linear-carve/refill with a single stride definition and keeps class7 (1024B) headerless as designed. - Debug builds add fail-fast checks; release builds remain lean. Next - Re-run Tiny benches (256/1024B) in debug to confirm stability, then in release. If any remaining crash persists, bisect with HAKMEM_TINY_P0_BATCH_REFILL=0 to isolate P0 batch carve, and continue reducing branch-miss as planned.
46 lines
2.1 KiB
C
46 lines
2.1 KiB
C
// mmap_trace_patch.h - Lightweight mmap instrumentation
|
|
#ifndef MMAP_TRACE_PATCH_H
|
|
#define MMAP_TRACE_PATCH_H
|
|
|
|
#include <stdatomic.h>
|
|
#include <stdio.h>
|
|
|
|
// Global counters for mmap sources
|
|
static _Atomic uint64_t g_mmap_count_superslab = 0;
|
|
static _Atomic uint64_t g_mmap_count_mid = 0;
|
|
static _Atomic uint64_t g_mmap_count_l25 = 0;
|
|
static _Atomic uint64_t g_mmap_count_ace = 0;
|
|
static _Atomic uint64_t g_mmap_count_final = 0; // Final fallback in hak_alloc_api
|
|
static _Atomic uint64_t g_mmap_count_other = 0;
|
|
|
|
// Helper macros for instrumentation
|
|
#define MMAP_TRACE_SUPERSLAB() atomic_fetch_add(&g_mmap_count_superslab, 1)
|
|
#define MMAP_TRACE_MID() atomic_fetch_add(&g_mmap_count_mid, 1)
|
|
#define MMAP_TRACE_L25() atomic_fetch_add(&g_mmap_count_l25, 1)
|
|
#define MMAP_TRACE_ACE() atomic_fetch_add(&g_mmap_count_ace, 1)
|
|
#define MMAP_TRACE_FINAL() atomic_fetch_add(&g_mmap_count_final, 1)
|
|
#define MMAP_TRACE_OTHER() atomic_fetch_add(&g_mmap_count_other, 1)
|
|
|
|
// Print summary
|
|
static inline void mmap_trace_print_summary(void) {
|
|
uint64_t ss = atomic_load(&g_mmap_count_superslab);
|
|
uint64_t mid = atomic_load(&g_mmap_count_mid);
|
|
uint64_t l25 = atomic_load(&g_mmap_count_l25);
|
|
uint64_t ace = atomic_load(&g_mmap_count_ace);
|
|
uint64_t final = atomic_load(&g_mmap_count_final);
|
|
uint64_t other = atomic_load(&g_mmap_count_other);
|
|
uint64_t total = ss + mid + l25 + ace + final + other;
|
|
|
|
fprintf(stderr, "\n=== MMAP Source Breakdown ===\n");
|
|
fprintf(stderr, "SuperSlab: %6lu (%.1f%%)\n", ss, ss * 100.0 / total);
|
|
fprintf(stderr, "Mid allocator: %6lu (%.1f%%)\n", mid, mid * 100.0 / total);
|
|
fprintf(stderr, "L25 pool: %6lu (%.1f%%)\n", l25, l25 * 100.0 / total);
|
|
fprintf(stderr, "ACE: %6lu (%.1f%%)\n", ace, ace * 100.0 / total);
|
|
fprintf(stderr, "Final fallback: %6lu (%.1f%%) ← Header overflow!\n", final, final * 100.0 / total);
|
|
fprintf(stderr, "Other: %6lu (%.1f%%)\n", other, other * 100.0 / total);
|
|
fprintf(stderr, "----------------------------\n");
|
|
fprintf(stderr, "TOTAL: %6lu\n", total);
|
|
}
|
|
|
|
#endif // MMAP_TRACE_PATCH_H
|