diff --git a/core/front/tiny_unified_cache.c b/core/front/tiny_unified_cache.c index 348f4869..dbf55293 100644 --- a/core/front/tiny_unified_cache.c +++ b/core/front/tiny_unified_cache.c @@ -4,15 +4,15 @@ #include "../tiny_tls.h" // Phase 23-E: TinyTLSSlab, TinySlabMeta #include "../tiny_box_geometry.h" // Phase 23-E: tiny_stride_for_class, tiny_slab_base_for_geometry #include "../box/tiny_next_ptr_box.h" // Phase 23-E: tiny_next_read (freelist traversal) -#include "../hakmem_tiny_superslab.h" // Phase 23-E: SuperSlab -#include "../superslab/superslab_inline.h" // Phase 23-E: ss_active_add +#include "../hakmem_tiny_superslab.h" // Phase 23-E: SuperSlab, superslab_refill() +#include "../superslab/superslab_inline.h" // Phase 23-E: ss_active_add, slab_index_for, ss_slabs_capacity +#include "../hakmem_super_registry.h" // For hak_super_lookup (pointer→SuperSlab) #include "../box/pagefault_telemetry_box.h" // Phase 24: Box PageFaultTelemetry (Tiny page touch stats) #include #include // Phase 23-E: Forward declarations extern __thread TinyTLSSlab g_tls_slabs[TINY_NUM_CLASSES]; // From hakmem_tiny_superslab.c -extern int superslab_refill(int class_idx); // From hakmem_tiny_superslab.c // ============================================================================ // TLS Variables (defined here, extern in header) @@ -131,6 +131,136 @@ void unified_cache_print_stats(void) { // Phase 23-E: Direct SuperSlab Carve (TLS SLL Bypass) // ============================================================================ +// Fail-fast helper: verify that a candidate BASE pointer belongs to a valid +// Tiny slab within a SuperSlab. This is intentionally defensive and only +// compiled in debug builds to avoid hot-path overhead in release. +static inline int unified_refill_validate_base(int class_idx, + TinyTLSSlab* tls, + TinySlabMeta* meta, + void* base, + const char* stage) +{ +#if HAKMEM_BUILD_RELEASE + (void)class_idx; (void)tls; (void)base; (void)stage; + return 1; +#else + if (!base) { + fprintf(stderr, + "[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=NULL tls_ss=%p meta=%p\n", + stage ? stage : "unified_refill", + class_idx, + (void*)(tls ? tls->ss : NULL), + (void*)meta); + abort(); + } + + SuperSlab* tls_ss = tls ? tls->ss : NULL; + if (!tls_ss || tls_ss->magic != SUPERSLAB_MAGIC) { + fprintf(stderr, + "[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p meta=%p (invalid TLS ss)\n", + stage ? stage : "unified_refill", + class_idx, + base, + (void*)tls_ss, + (void*)meta); + abort(); + } + + // Cross-check registry lookup for additional safety. + SuperSlab* ss_lookup = hak_super_lookup(base); + if (!ss_lookup || ss_lookup->magic != SUPERSLAB_MAGIC) { + fprintf(stderr, + "[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p lookup_ss=%p meta=%p\n", + stage ? stage : "unified_refill", + class_idx, + base, + (void*)tls_ss, + (void*)ss_lookup, + (void*)meta); + abort(); + } + if (ss_lookup != tls_ss) { + fprintf(stderr, + "[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p lookup_ss=%p (mismatch)\n", + stage ? stage : "unified_refill", + class_idx, + base, + (void*)tls_ss, + (void*)ss_lookup); + abort(); + } + + int slab_idx = tls ? (int)tls->slab_idx : -1; + int cap = ss_slabs_capacity(tls_ss); + if (slab_idx < 0 || slab_idx >= cap) { + fprintf(stderr, + "[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p slab_idx=%d cap=%d meta_cap=%u meta_used=%u meta_carved=%u\n", + stage ? stage : "unified_refill", + class_idx, + base, + (void*)tls_ss, + slab_idx, + cap, + meta ? meta->capacity : 0u, + meta ? (unsigned)meta->used : 0u, + meta ? (unsigned)meta->carved : 0u); + abort(); + } + + // Ensure meta matches TLS view for this slab. + TinySlabMeta* expected_meta = &tls_ss->slabs[slab_idx]; + if (meta && meta != expected_meta) { + fprintf(stderr, + "[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p tls_ss=%p slab_idx=%d meta=%p expected_meta=%p\n", + stage ? stage : "unified_refill", + class_idx, + base, + (void*)tls_ss, + slab_idx, + (void*)meta, + (void*)expected_meta); + abort(); + } + + uint8_t* slab_base = tiny_slab_base_for_geometry(tls_ss, slab_idx); + size_t stride = tiny_stride_for_class(class_idx); + size_t usable = tiny_usable_bytes_for_slab(slab_idx); + uint8_t* slab_end = slab_base + usable; + + if ((uint8_t*)base < slab_base || (uint8_t*)base >= slab_end) { + fprintf(stderr, + "[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p range=[%p,%p) stride=%zu meta_cap=%u meta_used=%u meta_carved=%u\n", + stage ? stage : "unified_refill", + class_idx, + base, + (void*)slab_base, + (void*)slab_end, + stride, + meta ? meta->capacity : 0u, + meta ? (unsigned)meta->used : 0u, + meta ? (unsigned)meta->carved : 0u); + abort(); + } + + ptrdiff_t offset = (uint8_t*)base - slab_base; + if (offset % (ptrdiff_t)stride != 0) { + fprintf(stderr, + "[UNIFIED_REFILL_CORRUPT] stage=%s cls=%d base=%p offset=%td stride=%zu (misaligned) meta_cap=%u meta_used=%u meta_carved=%u\n", + stage ? stage : "unified_refill", + class_idx, + base, + offset, + stride, + meta ? meta->capacity : 0u, + meta ? (unsigned)meta->used : 0u, + meta ? (unsigned)meta->carved : 0u); + abort(); + } + + return 1; +#endif +} + // Batch refill from SuperSlab (called on cache miss) // Returns: BASE pointer (first block), or NULL if failed // Design: Direct carve from SuperSlab to array (no TLS SLL intermediate layer) @@ -171,6 +301,9 @@ void* unified_cache_refill(int class_idx) { void* p = m->freelist; m->freelist = tiny_next_read(class_idx, p); + unified_refill_validate_base(class_idx, tls, m, p, + "unified_refill_freelist"); + // PageFaultTelemetry: record page touch for this BASE pagefault_telemetry_touch(class_idx, p); @@ -186,6 +319,9 @@ void* unified_cache_refill(int class_idx) { // Linear carve (fresh block, no freelist link) void* p = (void*)(base + ((size_t)m->carved * bs)); + unified_refill_validate_base(class_idx, tls, m, p, + "unified_refill_carve"); + // PageFaultTelemetry: record page touch for this BASE pagefault_telemetry_touch(class_idx, p); diff --git a/core/hakmem_tiny_superslab.h b/core/hakmem_tiny_superslab.h index 4465bfd8..40be9617 100644 --- a/core/hakmem_tiny_superslab.h +++ b/core/hakmem_tiny_superslab.h @@ -118,6 +118,11 @@ int superslab_find_free_slab(SuperSlab* ss); // Free a SuperSlab (unregister and return to pool or munmap) void superslab_free(SuperSlab* ss); +// Refill TLS slab for given tiny class from shared SuperSlab pool. +// Returns: SuperSlab* on success (also updates g_tls_slabs[class_idx]), +// NULL on failure (no change to TLS state). +SuperSlab* superslab_refill(int class_idx); + // Statistics void superslab_print_stats(SuperSlab* ss); diff --git a/core/superslab/superslab_inline.h b/core/superslab/superslab_inline.h index 9a46eebb..ad3305a8 100644 --- a/core/superslab/superslab_inline.h +++ b/core/superslab/superslab_inline.h @@ -2,6 +2,7 @@ #define SUPERSLAB_INLINE_H #include "superslab_types.h" +#include "../tiny_box_geometry.h" // Box 3 geometry helpers (stride/base/capacity) // Forward declaration for unsafe remote drain used by refill/handle paths // Implemented in hakmem_tiny_superslab.c @@ -19,20 +20,13 @@ static inline int ss_slabs_capacity(SuperSlab* ss) } // Compute slab base pointer for given (ss, slab_idx). +// Box 5 wrapper: delegate to Box 3 canonical geometry helper. static inline uint8_t* tiny_slab_base_for(SuperSlab* ss, int slab_idx) { - if (!ss || slab_idx < 0) return NULL; - - if (slab_idx == 0) { - return (uint8_t*)ss + SUPERSLAB_SLAB0_DATA_OFFSET; - } - - size_t off = SUPERSLAB_SLAB0_DATA_OFFSET + (size_t)slab_idx * SLAB_SIZE; - size_t ss_size = (size_t)1 << ss->lg_size; - if (off >= ss_size) { + if (!ss || slab_idx < 0) { return NULL; } - return (uint8_t*)ss + off; + return tiny_slab_base_for_geometry(ss, slab_idx); } // Compute slab index for a pointer inside ss.