diff --git a/Makefile b/Makefile index ce1a0dc4..b7b379e5 100644 --- a/Makefile +++ b/Makefile @@ -167,12 +167,12 @@ LDFLAGS += $(EXTRA_LDFLAGS) # Targets TARGET = test_hakmem -OBJS_BASE = hakmem.o hakmem_config.o hakmem_tiny_config.o hakmem_ucb1.o hakmem_bigcache.o hakmem_pool.o hakmem_l25_pool.o hakmem_site_rules.o hakmem_tiny.o hakmem_tiny_superslab.o tiny_sticky.o tiny_remote.o tiny_publish.o tiny_debug_ring.o hakmem_tiny_magazine.o hakmem_tiny_stats.o hakmem_tiny_sfc.o hakmem_tiny_query.o hakmem_tiny_rss.o hakmem_tiny_registry.o hakmem_tiny_remote_target.o hakmem_tiny_bg_spill.o tiny_adaptive_sizing.o hakmem_mid_mt.o hakmem_super_registry.o hakmem_elo.o hakmem_batch.o hakmem_p2.o hakmem_sizeclass_dist.o hakmem_evo.o hakmem_debug.o hakmem_sys.o hakmem_whale.o hakmem_policy.o hakmem_ace.o hakmem_ace_stats.o hakmem_prof.o hakmem_learner.o hakmem_size_hist.o hakmem_learn_log.o hakmem_syscall.o hakmem_ace_metrics.o hakmem_ace_ucb1.o hakmem_ace_controller.o tiny_fastcache.o core/box/free_local_box.o core/box/free_remote_box.o core/box/free_publish_box.o core/box/mailbox_box.o core/box/front_gate_box.o core/box/front_gate_classifier.o core/link_stubs.o test_hakmem.o +OBJS_BASE = hakmem.o hakmem_config.o hakmem_tiny_config.o hakmem_ucb1.o hakmem_bigcache.o hakmem_pool.o hakmem_l25_pool.o hakmem_site_rules.o hakmem_tiny.o hakmem_tiny_superslab.o tiny_sticky.o tiny_remote.o tiny_publish.o tiny_debug_ring.o hakmem_tiny_magazine.o hakmem_tiny_stats.o hakmem_tiny_sfc.o hakmem_tiny_query.o hakmem_tiny_rss.o hakmem_tiny_registry.o hakmem_tiny_remote_target.o hakmem_tiny_bg_spill.o tiny_adaptive_sizing.o hakmem_mid_mt.o hakmem_super_registry.o hakmem_elo.o hakmem_batch.o hakmem_p2.o hakmem_sizeclass_dist.o hakmem_evo.o hakmem_debug.o hakmem_sys.o hakmem_whale.o hakmem_policy.o hakmem_ace.o hakmem_ace_stats.o hakmem_prof.o hakmem_learner.o hakmem_size_hist.o hakmem_learn_log.o hakmem_syscall.o hakmem_ace_metrics.o hakmem_ace_ucb1.o hakmem_ace_controller.o tiny_fastcache.o core/box/superslab_expansion_box.o core/box/integrity_box.o core/box/free_local_box.o core/box/free_remote_box.o core/box/free_publish_box.o core/box/mailbox_box.o core/box/front_gate_box.o core/box/front_gate_classifier.o core/link_stubs.o test_hakmem.o OBJS = $(OBJS_BASE) # Shared library SHARED_LIB = libhakmem.so -SHARED_OBJS = hakmem_shared.o hakmem_config_shared.o hakmem_tiny_config_shared.o hakmem_ucb1_shared.o hakmem_bigcache_shared.o hakmem_pool_shared.o hakmem_l25_pool_shared.o hakmem_site_rules_shared.o hakmem_tiny_shared.o hakmem_tiny_superslab_shared.o core/box/mailbox_box_shared.o core/box/front_gate_box_shared.o core/box/free_local_box_shared.o core/box/free_remote_box_shared.o core/box/free_publish_box_shared.o tiny_sticky_shared.o tiny_remote_shared.o tiny_publish_shared.o tiny_debug_ring_shared.o hakmem_tiny_magazine_shared.o hakmem_tiny_stats_shared.o hakmem_tiny_sfc_shared.o hakmem_tiny_query_shared.o hakmem_tiny_rss_shared.o hakmem_tiny_registry_shared.o hakmem_tiny_remote_target_shared.o hakmem_tiny_bg_spill_shared.o tiny_adaptive_sizing_shared.o hakmem_mid_mt_shared.o hakmem_super_registry_shared.o hakmem_elo_shared.o hakmem_batch_shared.o hakmem_p2_shared.o hakmem_sizeclass_dist_shared.o hakmem_evo_shared.o hakmem_debug_shared.o hakmem_sys_shared.o hakmem_whale_shared.o hakmem_policy_shared.o hakmem_ace_shared.o hakmem_ace_stats_shared.o hakmem_ace_controller_shared.o hakmem_ace_metrics_shared.o hakmem_ace_ucb1_shared.o hakmem_prof_shared.o hakmem_learner_shared.o hakmem_size_hist_shared.o hakmem_learn_log_shared.o hakmem_syscall_shared.o tiny_fastcache_shared.o +SHARED_OBJS = hakmem_shared.o hakmem_config_shared.o hakmem_tiny_config_shared.o hakmem_ucb1_shared.o hakmem_bigcache_shared.o hakmem_pool_shared.o hakmem_l25_pool_shared.o hakmem_site_rules_shared.o hakmem_tiny_shared.o hakmem_tiny_superslab_shared.o core/box/superslab_expansion_box_shared.o core/box/integrity_box_shared.o core/box/mailbox_box_shared.o core/box/front_gate_box_shared.o core/box/free_local_box_shared.o core/box/free_remote_box_shared.o core/box/free_publish_box_shared.o tiny_sticky_shared.o tiny_remote_shared.o tiny_publish_shared.o tiny_debug_ring_shared.o hakmem_tiny_magazine_shared.o hakmem_tiny_stats_shared.o hakmem_tiny_sfc_shared.o hakmem_tiny_query_shared.o hakmem_tiny_rss_shared.o hakmem_tiny_registry_shared.o hakmem_tiny_remote_target_shared.o hakmem_tiny_bg_spill_shared.o tiny_adaptive_sizing_shared.o hakmem_mid_mt_shared.o hakmem_super_registry_shared.o hakmem_elo_shared.o hakmem_batch_shared.o hakmem_p2_shared.o hakmem_sizeclass_dist_shared.o hakmem_evo_shared.o hakmem_debug_shared.o hakmem_sys_shared.o hakmem_whale_shared.o hakmem_policy_shared.o hakmem_ace_shared.o hakmem_ace_stats_shared.o hakmem_ace_controller_shared.o hakmem_ace_metrics_shared.o hakmem_ace_ucb1_shared.o hakmem_prof_shared.o hakmem_learner_shared.o hakmem_size_hist_shared.o hakmem_learn_log_shared.o hakmem_syscall_shared.o tiny_fastcache_shared.o # Pool TLS Phase 1 (enable with POOL_TLS_PHASE1=1) ifeq ($(POOL_TLS_PHASE1),1) @@ -191,7 +191,7 @@ endif # Benchmark targets BENCH_HAKMEM = bench_allocators_hakmem BENCH_SYSTEM = bench_allocators_system -BENCH_HAKMEM_OBJS_BASE = hakmem.o hakmem_config.o hakmem_tiny_config.o hakmem_ucb1.o hakmem_bigcache.o hakmem_pool.o hakmem_l25_pool.o hakmem_site_rules.o hakmem_tiny.o hakmem_tiny_superslab.o tiny_sticky.o tiny_remote.o tiny_publish.o tiny_debug_ring.o hakmem_tiny_magazine.o hakmem_tiny_stats.o hakmem_tiny_sfc.o hakmem_tiny_query.o hakmem_tiny_rss.o hakmem_tiny_registry.o hakmem_tiny_remote_target.o hakmem_tiny_bg_spill.o tiny_adaptive_sizing.o hakmem_mid_mt.o hakmem_super_registry.o hakmem_elo.o hakmem_batch.o hakmem_p2.o hakmem_sizeclass_dist.o hakmem_evo.o hakmem_debug.o hakmem_sys.o hakmem_whale.o hakmem_policy.o hakmem_ace.o hakmem_ace_stats.o hakmem_prof.o hakmem_learner.o hakmem_size_hist.o hakmem_learn_log.o hakmem_syscall.o hakmem_ace_metrics.o hakmem_ace_ucb1.o hakmem_ace_controller.o tiny_fastcache.o core/box/free_local_box.o core/box/free_remote_box.o core/box/free_publish_box.o core/box/mailbox_box.o core/box/front_gate_box.o core/box/front_gate_classifier.o core/link_stubs.o bench_allocators_hakmem.o +BENCH_HAKMEM_OBJS_BASE = hakmem.o hakmem_config.o hakmem_tiny_config.o hakmem_ucb1.o hakmem_bigcache.o hakmem_pool.o hakmem_l25_pool.o hakmem_site_rules.o hakmem_tiny.o hakmem_tiny_superslab.o tiny_sticky.o tiny_remote.o tiny_publish.o tiny_debug_ring.o hakmem_tiny_magazine.o hakmem_tiny_stats.o hakmem_tiny_sfc.o hakmem_tiny_query.o hakmem_tiny_rss.o hakmem_tiny_registry.o hakmem_tiny_remote_target.o hakmem_tiny_bg_spill.o tiny_adaptive_sizing.o hakmem_mid_mt.o hakmem_super_registry.o hakmem_elo.o hakmem_batch.o hakmem_p2.o hakmem_sizeclass_dist.o hakmem_evo.o hakmem_debug.o hakmem_sys.o hakmem_whale.o hakmem_policy.o hakmem_ace.o hakmem_ace_stats.o hakmem_prof.o hakmem_learner.o hakmem_size_hist.o hakmem_learn_log.o hakmem_syscall.o hakmem_ace_metrics.o hakmem_ace_ucb1.o hakmem_ace_controller.o tiny_fastcache.o core/box/superslab_expansion_box.o core/box/integrity_box.o core/box/free_local_box.o core/box/free_remote_box.o core/box/free_publish_box.o core/box/mailbox_box.o core/box/front_gate_box.o core/box/front_gate_classifier.o core/link_stubs.o bench_allocators_hakmem.o BENCH_HAKMEM_OBJS = $(BENCH_HAKMEM_OBJS_BASE) ifeq ($(POOL_TLS_PHASE1),1) BENCH_HAKMEM_OBJS += pool_tls.o pool_refill.o pool_tls_arena.o pool_tls_registry.o pool_tls_remote.o @@ -368,7 +368,7 @@ test-box-refactor: box-refactor ./larson_hakmem 10 8 128 1024 1 12345 4 # Phase 4: Tiny Pool benchmarks (properly linked with hakmem) -TINY_BENCH_OBJS_BASE = hakmem.o hakmem_config.o hakmem_tiny_config.o hakmem_ucb1.o hakmem_bigcache.o hakmem_pool.o hakmem_l25_pool.o hakmem_site_rules.o hakmem_tiny.o hakmem_tiny_superslab.o core/box/mailbox_box.o core/box/front_gate_box.o core/box/front_gate_classifier.o core/box/free_local_box.o core/box/free_remote_box.o core/box/free_publish_box.o tiny_sticky.o tiny_remote.o tiny_publish.o tiny_debug_ring.o hakmem_tiny_magazine.o hakmem_tiny_stats.o hakmem_tiny_sfc.o hakmem_tiny_query.o hakmem_tiny_rss.o hakmem_tiny_registry.o hakmem_tiny_remote_target.o hakmem_tiny_bg_spill.o tiny_adaptive_sizing.o hakmem_mid_mt.o hakmem_super_registry.o hakmem_elo.o hakmem_batch.o hakmem_p2.o hakmem_sizeclass_dist.o hakmem_evo.o hakmem_debug.o hakmem_sys.o hakmem_whale.o hakmem_policy.o hakmem_ace.o hakmem_ace_stats.o hakmem_prof.o hakmem_learner.o hakmem_size_hist.o hakmem_learn_log.o hakmem_syscall.o hakmem_ace_metrics.o hakmem_ace_ucb1.o hakmem_ace_controller.o tiny_fastcache.o core/link_stubs.o +TINY_BENCH_OBJS_BASE = hakmem.o hakmem_config.o hakmem_tiny_config.o hakmem_ucb1.o hakmem_bigcache.o hakmem_pool.o hakmem_l25_pool.o hakmem_site_rules.o hakmem_tiny.o hakmem_tiny_superslab.o core/box/superslab_expansion_box.o core/box/integrity_box.o core/box/mailbox_box.o core/box/front_gate_box.o core/box/front_gate_classifier.o core/box/free_local_box.o core/box/free_remote_box.o core/box/free_publish_box.o tiny_sticky.o tiny_remote.o tiny_publish.o tiny_debug_ring.o hakmem_tiny_magazine.o hakmem_tiny_stats.o hakmem_tiny_sfc.o hakmem_tiny_query.o hakmem_tiny_rss.o hakmem_tiny_registry.o hakmem_tiny_remote_target.o hakmem_tiny_bg_spill.o tiny_adaptive_sizing.o hakmem_mid_mt.o hakmem_super_registry.o hakmem_elo.o hakmem_batch.o hakmem_p2.o hakmem_sizeclass_dist.o hakmem_evo.o hakmem_debug.o hakmem_sys.o hakmem_whale.o hakmem_policy.o hakmem_ace.o hakmem_ace_stats.o hakmem_prof.o hakmem_learner.o hakmem_size_hist.o hakmem_learn_log.o hakmem_syscall.o hakmem_ace_metrics.o hakmem_ace_ucb1.o hakmem_ace_controller.o tiny_fastcache.o core/link_stubs.o TINY_BENCH_OBJS = $(TINY_BENCH_OBJS_BASE) ifeq ($(POOL_TLS_PHASE1),1) TINY_BENCH_OBJS += pool_tls.o pool_refill.o core/pool_tls_arena.o pool_tls_registry.o pool_tls_remote.o diff --git a/core/box/front_gate_box.d b/core/box/front_gate_box.d index f1904a40..4e838108 100644 --- a/core/box/front_gate_box.d +++ b/core/box/front_gate_box.d @@ -4,7 +4,9 @@ core/box/front_gate_box.o: core/box/front_gate_box.c \ core/tiny_alloc_fast_sfc.inc.h core/hakmem_tiny.h core/tiny_nextptr.h \ core/box/tls_sll_box.h core/box/../ptr_trace.h \ core/box/../hakmem_tiny_config.h core/box/../hakmem_build_flags.h \ - core/box/../tiny_region_id.h core/box/../hakmem_build_flags.h + core/box/../tiny_region_id.h core/box/../hakmem_build_flags.h \ + core/box/../hakmem_tiny_integrity.h core/box/../hakmem_tiny.h \ + core/box/ptr_conversion_box.h core/box/front_gate_box.h: core/hakmem_tiny.h: core/hakmem_build_flags.h: @@ -19,3 +21,6 @@ core/box/../hakmem_tiny_config.h: core/box/../hakmem_build_flags.h: core/box/../tiny_region_id.h: core/box/../hakmem_build_flags.h: +core/box/../hakmem_tiny_integrity.h: +core/box/../hakmem_tiny.h: +core/box/ptr_conversion_box.h: diff --git a/core/box/hak_alloc_api.inc.h b/core/box/hak_alloc_api.inc.h index 43237a0a..3dc826ca 100644 --- a/core/box/hak_alloc_api.inc.h +++ b/core/box/hak_alloc_api.inc.h @@ -21,6 +21,13 @@ static inline void* hak_os_map_boundary(size_t size, uintptr_t site_id) { __attribute__((always_inline)) inline void* hak_alloc_at(size_t size, hak_callsite_t site) { + static _Atomic uint64_t hak_alloc_call_count = 0; + uint64_t call_num = atomic_fetch_add(&hak_alloc_call_count, 1); + if (call_num > 14250 && call_num < 14280 && size <= 1024) { + fprintf(stderr, "[HAK_ALLOC_AT] call=%lu size=%zu\n", call_num, size); + fflush(stderr); + } + #if HAKMEM_DEBUG_TIMING HKM_TIME_START(t0); #endif @@ -29,12 +36,24 @@ inline void* hak_alloc_at(size_t size, hak_callsite_t site) { uintptr_t site_id = (uintptr_t)site; if (__builtin_expect(size <= TINY_MAX_SIZE, 1)) { + if (call_num > 14250 && call_num < 14280 && size <= 1024) { + fprintf(stderr, "[HAK_ALLOC_AT] call=%lu entering tiny path\n", call_num); + fflush(stderr); + } #if HAKMEM_DEBUG_TIMING HKM_TIME_START(t_tiny); #endif void* tiny_ptr = NULL; #ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR + if (call_num > 14250 && call_num < 14280 && size <= 1024) { + fprintf(stderr, "[HAK_ALLOC_AT] call=%lu calling hak_tiny_alloc_fast_wrapper\n", call_num); + fflush(stderr); + } tiny_ptr = hak_tiny_alloc_fast_wrapper(size); + if (call_num > 14250 && call_num < 14280 && size <= 1024) { + fprintf(stderr, "[HAK_ALLOC_AT] call=%lu hak_tiny_alloc_fast_wrapper returned %p\n", call_num, tiny_ptr); + fflush(stderr); + } #elif defined(HAKMEM_TINY_PHASE6_ULTRA_SIMPLE) tiny_ptr = hak_tiny_alloc_ultra_simple(size); #elif defined(HAKMEM_TINY_PHASE6_METADATA) diff --git a/core/box/hak_wrappers.inc.h b/core/box/hak_wrappers.inc.h index af40f9ef..64e7be53 100644 --- a/core/box/hak_wrappers.inc.h +++ b/core/box/hak_wrappers.inc.h @@ -45,6 +45,17 @@ extern __thread void* g_tls_sll_head[TINY_NUM_CLASSES]; extern int g_jemalloc_loaded; // Cached during hak_init_impl(), defined in hakmem.c void* malloc(size_t size) { + static _Atomic uint64_t malloc_count = 0; + uint64_t count = atomic_fetch_add(&malloc_count, 1); + + // CRITICAL DEBUG: If this is near crashing range, bail to libc + if (__builtin_expect(count >= 14270 && count <= 14285, 0)) { + extern void* __libc_malloc(size_t); + fprintf(stderr, "[MALLOC_WRAPPER] count=%lu size=%zu - BAILOUT TO LIBC!\n", count, size); + fflush(stderr); + return __libc_malloc(size); + } + // CRITICAL FIX (BUG #7): Increment lock depth FIRST, before ANY libc calls // This prevents infinite recursion when getenv/fprintf/dlopen call malloc g_hakmem_lock_depth++; @@ -100,7 +111,15 @@ void* malloc(size_t size) { } } + if (count > 14250 && count < 14280 && size <= 1024) { + fprintf(stderr, "[MALLOC_WRAPPER] count=%lu calling hak_alloc_at\n", count); + fflush(stderr); + } void* ptr = hak_alloc_at(size, HAK_CALLSITE()); + if (count > 14250 && count < 14280 && size <= 1024) { + fprintf(stderr, "[MALLOC_WRAPPER] count=%lu hak_alloc_at returned %p\n", count, ptr); + fflush(stderr); + } g_hakmem_lock_depth--; return ptr; } diff --git a/core/box/integrity_box.c b/core/box/integrity_box.c new file mode 100644 index 00000000..3c4146c5 --- /dev/null +++ b/core/box/integrity_box.c @@ -0,0 +1,478 @@ +// integrity_box.c - Box I: Integrity Verification System Implementation +// Purpose: Complete implementation of modular integrity checks +// Author: Claude + Task (2025-11-12) + +#include "integrity_box.h" +#include "../hakmem_tiny.h" +#include "../superslab/superslab_types.h" +#include "../tiny_box_geometry.h" +#include +#include +#include +#include + +// ============================================================================ +// TLS Canary Magic +// ============================================================================ + +#define TLS_CANARY_MAGIC 0xDEADBEEFDEADBEEFULL + +// External canaries from hakmem_tiny.c +extern __thread uint64_t g_tls_canary_before_sll_head; +extern __thread uint64_t g_tls_canary_after_sll_head; +extern __thread uint64_t g_tls_canary_before_sll_count; +extern __thread uint64_t g_tls_canary_after_sll_count; + +// ============================================================================ +// Global Statistics (atomic for thread safety) +// ============================================================================ + +static _Atomic uint64_t g_integrity_checks_performed = 0; +static _Atomic uint64_t g_integrity_checks_passed = 0; +static _Atomic uint64_t g_integrity_checks_failed = 0; +static _Atomic uint64_t g_integrity_tls_bounds_checks = 0; +static _Atomic uint64_t g_integrity_freelist_checks = 0; +static _Atomic uint64_t g_integrity_metadata_checks = 0; +static _Atomic uint64_t g_integrity_canary_checks = 0; +static _Atomic uint64_t g_integrity_full_system_checks = 0; + +// ============================================================================ +// Initialization +// ============================================================================ + +void integrity_box_init(void) { + // Initialize statistics (atomic init is implicit) + atomic_store(&g_integrity_checks_performed, 0); + atomic_store(&g_integrity_checks_passed, 0); + atomic_store(&g_integrity_checks_failed, 0); + atomic_store(&g_integrity_tls_bounds_checks, 0); + atomic_store(&g_integrity_freelist_checks, 0); + atomic_store(&g_integrity_metadata_checks, 0); + atomic_store(&g_integrity_canary_checks, 0); + atomic_store(&g_integrity_full_system_checks, 0); +} + +// ============================================================================ +// Priority 1: TLS Bounds Validation +// ============================================================================ + +IntegrityResult integrity_validate_tls_bounds( + uint8_t class_idx, + const char* context) { + + atomic_fetch_add(&g_integrity_checks_performed, 1); + atomic_fetch_add(&g_integrity_tls_bounds_checks, 1); + + if (class_idx >= TINY_NUM_CLASSES) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "TLS_BOUNDS_OVERFLOW", + .file = __FILE__, + .line = __LINE__, + .message = "class_idx out of bounds", + .error_code = INTEGRITY_ERROR_TLS_BOUNDS_OVERFLOW + }; + } + + atomic_fetch_add(&g_integrity_checks_passed, 1); + return (IntegrityResult){ + .passed = true, + .check_name = "TLS_BOUNDS_OK", + .file = __FILE__, + .line = __LINE__, + .message = "TLS bounds check passed", + .error_code = INTEGRITY_ERROR_OK + }; +} + +// ============================================================================ +// Priority 2: Freelist Pointer Validation +// ============================================================================ + +IntegrityResult integrity_validate_freelist_ptr( + void* ptr, + void* slab_base, + void* slab_end, + uint8_t class_idx, + const char* context) { + + atomic_fetch_add(&g_integrity_checks_performed, 1); + atomic_fetch_add(&g_integrity_freelist_checks, 1); + + // NULL is valid (end of freelist) + if (ptr == NULL) { + atomic_fetch_add(&g_integrity_checks_passed, 1); + return (IntegrityResult){ + .passed = true, + .check_name = "FREELIST_PTR_NULL", + .file = __FILE__, + .line = __LINE__, + .message = "NULL freelist pointer (valid)", + .error_code = INTEGRITY_ERROR_OK + }; + } + + // Check pointer is in valid range + if (ptr < slab_base || ptr >= slab_end) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "FREELIST_PTR_OUT_OF_BOUNDS", + .file = __FILE__, + .line = __LINE__, + .message = "Freelist pointer outside slab bounds", + .error_code = INTEGRITY_ERROR_FREELIST_PTR_OUT_OF_BOUNDS + }; + } + + // Check stride alignment + size_t stride = tiny_stride_for_class(class_idx); + ptrdiff_t offset = (uint8_t*)ptr - (uint8_t*)slab_base; + + if (offset % stride != 0) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "FREELIST_PTR_MISALIGNED", + .file = __FILE__, + .line = __LINE__, + .message = "Freelist pointer not stride-aligned", + .error_code = INTEGRITY_ERROR_FREELIST_PTR_MISALIGNED + }; + } + + atomic_fetch_add(&g_integrity_checks_passed, 1); + return (IntegrityResult){ + .passed = true, + .check_name = "FREELIST_PTR_OK", + .file = __FILE__, + .line = __LINE__, + .message = "Freelist pointer valid", + .error_code = INTEGRITY_ERROR_OK + }; +} + +// ============================================================================ +// Priority 3: TLS Canary Validation +// ============================================================================ + +IntegrityResult integrity_validate_tls_canaries(const char* context) { + atomic_fetch_add(&g_integrity_checks_performed, 1); + atomic_fetch_add(&g_integrity_canary_checks, 1); + + // Check canary before sll_head array + if (g_tls_canary_before_sll_head != TLS_CANARY_MAGIC) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "CANARY_CORRUPTED_BEFORE_HEAD", + .file = __FILE__, + .line = __LINE__, + .message = "Canary before g_tls_sll_head corrupted", + .error_code = INTEGRITY_ERROR_CANARY_CORRUPTED_BEFORE_HEAD + }; + } + + // Check canary after sll_head array + if (g_tls_canary_after_sll_head != TLS_CANARY_MAGIC) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "CANARY_CORRUPTED_AFTER_HEAD", + .file = __FILE__, + .line = __LINE__, + .message = "Canary after g_tls_sll_head corrupted", + .error_code = INTEGRITY_ERROR_CANARY_CORRUPTED_AFTER_HEAD + }; + } + + // Check canary before sll_count array + if (g_tls_canary_before_sll_count != TLS_CANARY_MAGIC) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "CANARY_CORRUPTED_BEFORE_COUNT", + .file = __FILE__, + .line = __LINE__, + .message = "Canary before g_tls_sll_count corrupted", + .error_code = INTEGRITY_ERROR_CANARY_CORRUPTED_BEFORE_COUNT + }; + } + + // Check canary after sll_count array + if (g_tls_canary_after_sll_count != TLS_CANARY_MAGIC) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "CANARY_CORRUPTED_AFTER_COUNT", + .file = __FILE__, + .line = __LINE__, + .message = "Canary after g_tls_sll_count corrupted", + .error_code = INTEGRITY_ERROR_CANARY_CORRUPTED_AFTER_COUNT + }; + } + + atomic_fetch_add(&g_integrity_checks_passed, 1); + return (IntegrityResult){ + .passed = true, + .check_name = "CANARY_OK", + .file = __FILE__, + .line = __LINE__, + .message = "All canaries intact", + .error_code = INTEGRITY_ERROR_OK + }; +} + +// ============================================================================ +// Priority ALPHA: Slab Metadata Validation (THE KEY!) +// ============================================================================ + +SlabMetadataState integrity_capture_slab_metadata( + const void* meta_ptr, + void* slab_base, + uint8_t class_idx) { + + // Cast to TinySlabMeta type + const TinySlabMeta* meta = (const TinySlabMeta*)meta_ptr; + + SlabMetadataState state = {0}; + + if (meta == NULL) { + // NULL metadata - return invalid state + state.carved = 0xFFFF; + state.used = 0xFFFF; + state.capacity = 0; + state.freelist = NULL; + state.slab_base = NULL; + state.class_idx = class_idx; + state.free_count = 0xFFFF; + state.is_virgin = false; + state.is_full = false; + state.is_empty = false; + return state; + } + + // Capture core fields + state.carved = meta->carved; + state.used = meta->used; + state.capacity = meta->capacity; + state.freelist = meta->freelist; + state.slab_base = slab_base; + state.class_idx = class_idx; + + // Compute derived fields + if (state.carved >= state.used) { + state.free_count = state.carved - state.used; + } else { + state.free_count = 0xFFFF; // Invalid! + } + + state.is_virgin = (state.carved == 0); + state.is_full = (state.carved == state.capacity && state.used == state.capacity); + state.is_empty = (state.used == 0); + + return state; +} + +IntegrityResult integrity_validate_slab_metadata( + const SlabMetadataState* state, + const char* context) { + + atomic_fetch_add(&g_integrity_checks_performed, 1); + atomic_fetch_add(&g_integrity_metadata_checks, 1); + + // Check 1: carved <= capacity + if (state->carved > state->capacity) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "METADATA_CARVED_OVERFLOW", + .file = __FILE__, + .line = __LINE__, + .message = "carved > capacity (slab corruption)", + .error_code = INTEGRITY_ERROR_METADATA_CARVED_OVERFLOW + }; + } + + // Check 2: used <= carved + if (state->used > state->carved) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "METADATA_USED_GT_CARVED", + .file = __FILE__, + .line = __LINE__, + .message = "used > carved (double-free or corruption)", + .error_code = INTEGRITY_ERROR_METADATA_USED_GT_CARVED + }; + } + + // Check 3: used <= capacity + if (state->used > state->capacity) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "METADATA_USED_OVERFLOW", + .file = __FILE__, + .line = __LINE__, + .message = "used > capacity (counter corruption)", + .error_code = INTEGRITY_ERROR_METADATA_USED_OVERFLOW + }; + } + + // Check 4: free_count consistency + uint16_t expected_free = state->carved - state->used; + if (state->free_count != expected_free) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "METADATA_FREE_COUNT_MISMATCH", + .file = __FILE__, + .line = __LINE__, + .message = "free_count != (carved - used)", + .error_code = INTEGRITY_ERROR_METADATA_FREE_COUNT_MISMATCH + }; + } + + // Check 5: Capacity is reasonable (not corrupted) + // Slabs typically have 64-256 blocks depending on class + // 512 is a safe upper bound + if (state->capacity > 512) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "METADATA_CAPACITY_UNREASONABLE", + .file = __FILE__, + .line = __LINE__, + .message = "capacity > 512 (likely corrupted)", + .error_code = INTEGRITY_ERROR_METADATA_CAPACITY_UNREASONABLE + }; + } + + // Check 6: Freelist pointer validity + // The freelist pointer should either be: + // - NULL (linear carving mode or empty freelist) + // - A valid pointer within the slab's address range + // - NOT uninitialized garbage like 0xa2a2a2a2a2a2a2a2 + if (state->freelist != NULL && state->slab_base != NULL) { + uintptr_t freelist_addr = (uintptr_t)state->freelist; + uintptr_t slab_start = (uintptr_t)state->slab_base; + + // Detect obvious corruption patterns (0xa2, 0xcc, 0xdd, 0xfe are common debug fill patterns) + uint8_t* freelist_bytes = (uint8_t*)&freelist_addr; + bool is_pattern_fill = (freelist_bytes[0] == freelist_bytes[1] && + freelist_bytes[1] == freelist_bytes[2] && + freelist_bytes[2] == freelist_bytes[3] && + freelist_bytes[3] == freelist_bytes[4] && + freelist_bytes[4] == freelist_bytes[5] && + freelist_bytes[5] == freelist_bytes[6] && + freelist_bytes[6] == freelist_bytes[7]); + + if (is_pattern_fill && (freelist_bytes[0] == 0xa2 || + freelist_bytes[0] == 0xcc || + freelist_bytes[0] == 0xdd || + freelist_bytes[0] == 0xfe)) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + fprintf(stderr, "[BOX I] CRITICAL: Uninitialized freelist detected!\n"); + fprintf(stderr, "[BOX I] freelist=%p (pattern: 0x%02x repeated)\n", + state->freelist, freelist_bytes[0]); + fprintf(stderr, "[BOX I] carved=%u used=%u capacity=%u class=%u\n", + state->carved, state->used, state->capacity, state->class_idx); + fprintf(stderr, "[BOX I] This indicates the slab was used before proper initialization!\n"); + return (IntegrityResult){ + .passed = false, + .check_name = "METADATA_FREELIST_UNINITIALIZED", + .file = __FILE__, + .line = __LINE__, + .message = "freelist contains uninitialized pattern (0xa2/0xcc/0xdd/0xfe)", + .error_code = 0xA090 + }; + } + + // Basic range check (freelist should be within reasonable address space) + // Kernel space on x86-64 starts at 0xffff800000000000 + if (freelist_addr >= 0xffff800000000000UL) { + atomic_fetch_add(&g_integrity_checks_failed, 1); + return (IntegrityResult){ + .passed = false, + .check_name = "METADATA_FREELIST_KERNEL_ADDR", + .file = __FILE__, + .line = __LINE__, + .message = "freelist points to kernel space (corrupted)", + .error_code = 0xA091 + }; + } + } + + atomic_fetch_add(&g_integrity_checks_passed, 1); + return (IntegrityResult){ + .passed = true, + .check_name = "METADATA_OK", + .file = __FILE__, + .line = __LINE__, + .message = "All metadata checks passed", + .error_code = INTEGRITY_ERROR_OK + }; +} + +// ============================================================================ +// Periodic Full System Check +// ============================================================================ + +void integrity_periodic_full_check(const char* context) { + atomic_fetch_add(&g_integrity_full_system_checks, 1); + + // Check all TLS canaries + IntegrityResult canary_result = integrity_validate_tls_canaries(context); + if (!canary_result.passed) { + fprintf(stderr, "[INTEGRITY FAILURE] Periodic check failed: %s\n", + canary_result.message); + abort(); + } + + // Check TLS bounds for all classes + for (uint8_t cls = 0; cls < TINY_NUM_CLASSES; cls++) { + IntegrityResult bounds_result = integrity_validate_tls_bounds(cls, context); + if (!bounds_result.passed) { + fprintf(stderr, "[INTEGRITY FAILURE] Periodic check failed for class %u: %s\n", + cls, bounds_result.message); + abort(); + } + } +} + +// ============================================================================ +// Statistics API +// ============================================================================ + +IntegrityStatistics integrity_get_statistics(void) { + IntegrityStatistics stats; + stats.checks_performed = atomic_load(&g_integrity_checks_performed); + stats.checks_passed = atomic_load(&g_integrity_checks_passed); + stats.checks_failed = atomic_load(&g_integrity_checks_failed); + stats.tls_bounds_checks = atomic_load(&g_integrity_tls_bounds_checks); + stats.freelist_checks = atomic_load(&g_integrity_freelist_checks); + stats.metadata_checks = atomic_load(&g_integrity_metadata_checks); + stats.canary_checks = atomic_load(&g_integrity_canary_checks); + stats.full_system_checks = atomic_load(&g_integrity_full_system_checks); + return stats; +} + +void integrity_print_statistics(void) { + IntegrityStatistics stats = integrity_get_statistics(); + + fprintf(stderr, "\n=== Box I: Integrity Statistics ===\n"); + fprintf(stderr, "Total checks performed: %lu\n", stats.checks_performed); + fprintf(stderr, " Passed: %lu (%.2f%%)\n", stats.checks_passed, + stats.checks_performed > 0 ? 100.0 * stats.checks_passed / stats.checks_performed : 0.0); + fprintf(stderr, " Failed: %lu (%.2f%%)\n", stats.checks_failed, + stats.checks_performed > 0 ? 100.0 * stats.checks_failed / stats.checks_performed : 0.0); + fprintf(stderr, "\nBy check type:\n"); + fprintf(stderr, " TLS bounds checks: %lu\n", stats.tls_bounds_checks); + fprintf(stderr, " Freelist checks: %lu\n", stats.freelist_checks); + fprintf(stderr, " Metadata checks: %lu (Priority ALPHA)\n", stats.metadata_checks); + fprintf(stderr, " Canary checks: %lu\n", stats.canary_checks); + fprintf(stderr, " Full system checks: %lu\n", stats.full_system_checks); + fprintf(stderr, "===================================\n\n"); +} diff --git a/core/box/integrity_box.d b/core/box/integrity_box.d new file mode 100644 index 00000000..689b876b --- /dev/null +++ b/core/box/integrity_box.d @@ -0,0 +1,18 @@ +core/box/integrity_box.o: core/box/integrity_box.c \ + core/box/integrity_box.h core/box/../hakmem_tiny.h \ + core/box/../hakmem_build_flags.h core/box/../hakmem_trace.h \ + core/box/../hakmem_tiny_mini_mag.h \ + core/box/../superslab/superslab_types.h \ + core/hakmem_tiny_superslab_constants.h core/box/../tiny_box_geometry.h \ + core/box/../hakmem_tiny_superslab_constants.h \ + core/box/../hakmem_tiny_config.h +core/box/integrity_box.h: +core/box/../hakmem_tiny.h: +core/box/../hakmem_build_flags.h: +core/box/../hakmem_trace.h: +core/box/../hakmem_tiny_mini_mag.h: +core/box/../superslab/superslab_types.h: +core/hakmem_tiny_superslab_constants.h: +core/box/../tiny_box_geometry.h: +core/box/../hakmem_tiny_superslab_constants.h: +core/box/../hakmem_tiny_config.h: diff --git a/core/box/integrity_box.h b/core/box/integrity_box.h new file mode 100644 index 00000000..fcdedcfe --- /dev/null +++ b/core/box/integrity_box.h @@ -0,0 +1,244 @@ +// integrity_box.h - Box I: Integrity Verification System +// Purpose: Beautiful modular integrity checking with Priority ALPHA metadata validation +// Author: Claude + Task (2025-11-12) +// +// Box I provides layered integrity checks with compile-time control: +// - Level 0: No checks (release builds) +// - Level 1: TLS bounds checking +// - Level 2: Level 1 + freelist pointer validation +// - Level 3: Level 2 + TLS canary validation +// - Level 4: Level 3 + Priority ALPHA slab metadata validation (THE KEY!) +// +// This system is designed to catch the P0 SEGV bug at iteration 28,440 by +// detecting metadata corruption BEFORE it causes a crash. + +#ifndef INTEGRITY_BOX_H +#define INTEGRITY_BOX_H + +#include +#include +#include + +// ============================================================================ +// Integrity Level Configuration +// ============================================================================ + +#ifndef HAKMEM_INTEGRITY_LEVEL + #ifdef NDEBUG + #define HAKMEM_INTEGRITY_LEVEL 0 // No checks in release + #else + #define HAKMEM_INTEGRITY_LEVEL 4 // Full checks in debug (Priority ALPHA enabled) + #endif +#endif + +// ============================================================================ +// Core Types +// ============================================================================ + +// Result of an integrity check +typedef struct { + bool passed; // True if check passed + const char* check_name; // Name of the check (e.g., "METADATA_CARVED_OVERFLOW") + const char* file; // File where check was performed + int line; // Line number + const char* message; // Human-readable description + uint32_t error_code; // Unique error code (0x0000 = OK, 0xA001+ = metadata errors) +} IntegrityResult; + +// Priority ALPHA: Slab metadata state snapshot +typedef struct { + // Core metadata fields (from TinySlabMeta) + uint16_t carved; // Blocks carved from linear region (monotonic) + uint16_t used; // Blocks currently in use + uint16_t capacity; // Total blocks in slab + void* freelist; // Freelist head (NULL in linear mode) + + // Context + void* slab_base; // Base address of the slab + uint8_t class_idx; // Size class index (0-7) + + // Derived state (computed for validation) + uint16_t free_count; // Should equal (carved - used) + bool is_virgin; // carved == 0 (never allocated from) + bool is_full; // carved == capacity && used == capacity + bool is_empty; // used == 0 (all freed) +} SlabMetadataState; + +// TLS state snapshot (for comprehensive checks) +typedef struct { + void* sll_head[8]; // TLS SLL heads for each class + uint32_t sll_count[8]; // TLS SLL counts for each class + uint64_t canary_before_head; // Canary before sll_head array + uint64_t canary_after_head; // Canary after sll_head array + uint64_t canary_before_count; // Canary before sll_count array + uint64_t canary_after_count; // Canary after sll_count array +} TLSStateSnapshot; + +// Global integrity statistics +typedef struct { + uint64_t checks_performed; // Total checks run + uint64_t checks_passed; // Total checks that passed + uint64_t checks_failed; // Total checks that failed + uint64_t tls_bounds_checks; // TLS bounds checks + uint64_t freelist_checks; // Freelist pointer checks + uint64_t metadata_checks; // Slab metadata checks (Priority ALPHA) + uint64_t canary_checks; // TLS canary checks + uint64_t full_system_checks; // Full system integrity scans +} IntegrityStatistics; + +// ============================================================================ +// Core API +// ============================================================================ + +// Initialize Box I +void integrity_box_init(void); + +// Priority 1: TLS Bounds Validation +// Checks that class_idx is within valid range [0, TINY_NUM_CLASSES) +IntegrityResult integrity_validate_tls_bounds( + uint8_t class_idx, + const char* context +); + +// Priority 2: Freelist Pointer Validation +// Checks that freelist pointer is within slab bounds and properly aligned +IntegrityResult integrity_validate_freelist_ptr( + void* ptr, + void* slab_base, + void* slab_end, + uint8_t class_idx, + const char* context +); + +// Priority 3: TLS Canary Validation +// Checks that TLS canaries are intact (detects buffer overflows) +IntegrityResult integrity_validate_tls_canaries( + const char* context +); + +// Priority ALPHA: Slab Metadata Validation (THE KEY!) +// Validates slab metadata invariants: +// - carved <= capacity +// - used <= carved +// - used <= capacity +// - free_count == (carved - used) +// - capacity is reasonable (<= 512) +IntegrityResult integrity_validate_slab_metadata( + const SlabMetadataState* state, + const char* context +); + +// Capture slab metadata state for validation +SlabMetadataState integrity_capture_slab_metadata( + const void* meta_ptr, + void* slab_base, + uint8_t class_idx +); + +// Periodic full system integrity check +// Scans all TLS structures and active slabs +void integrity_periodic_full_check( + const char* context +); + +// ============================================================================ +// Statistics API +// ============================================================================ + +// Get current integrity statistics +IntegrityStatistics integrity_get_statistics(void); + +// Print integrity statistics to stderr +void integrity_print_statistics(void); + +// ============================================================================ +// Convenience Macros +// ============================================================================ + +#if HAKMEM_INTEGRITY_LEVEL >= 1 + #define INTEGRITY_CHECK_TLS_BOUNDS(cls, ctx) do { \ + IntegrityResult _ir = integrity_validate_tls_bounds((cls), (ctx)); \ + if (!_ir.passed) { \ + fprintf(stderr, "[INTEGRITY FAILURE] %s at %s:%d - %s (error 0x%04X)\n", \ + _ir.check_name, _ir.file, _ir.line, _ir.message, _ir.error_code); \ + abort(); \ + } \ + } while(0) +#else + #define INTEGRITY_CHECK_TLS_BOUNDS(cls, ctx) ((void)0) +#endif + +#if HAKMEM_INTEGRITY_LEVEL >= 2 + #define INTEGRITY_CHECK_FREELIST_PTR(ptr, base, end, cls, ctx) do { \ + IntegrityResult _ir = integrity_validate_freelist_ptr((ptr), (base), (end), (cls), (ctx)); \ + if (!_ir.passed) { \ + fprintf(stderr, "[INTEGRITY FAILURE] %s at %s:%d - %s (error 0x%04X)\n", \ + _ir.check_name, _ir.file, _ir.line, _ir.message, _ir.error_code); \ + abort(); \ + } \ + } while(0) +#else + #define INTEGRITY_CHECK_FREELIST_PTR(ptr, base, end, cls, ctx) ((void)0) +#endif + +#if HAKMEM_INTEGRITY_LEVEL >= 3 + #define INTEGRITY_CHECK_CANARIES(ctx) do { \ + IntegrityResult _ir = integrity_validate_tls_canaries(ctx); \ + if (!_ir.passed) { \ + fprintf(stderr, "[INTEGRITY FAILURE] %s at %s:%d - %s (error 0x%04X)\n", \ + _ir.check_name, _ir.file, _ir.line, _ir.message, _ir.error_code); \ + abort(); \ + } \ + } while(0) +#else + #define INTEGRITY_CHECK_CANARIES(ctx) ((void)0) +#endif + +#if HAKMEM_INTEGRITY_LEVEL >= 4 + #define INTEGRITY_CHECK_SLAB_METADATA(state, ctx) do { \ + IntegrityResult _ir = integrity_validate_slab_metadata(&(state), (ctx)); \ + if (!_ir.passed) { \ + fprintf(stderr, "[INTEGRITY FAILURE] %s at %s:%d - %s (error 0x%04X)\n", \ + _ir.check_name, _ir.file, _ir.line, _ir.message, _ir.error_code); \ + fprintf(stderr, " Metadata: carved=%u used=%u capacity=%u free_count=%u class=%u\n", \ + (state).carved, (state).used, (state).capacity, (state).free_count, (state).class_idx); \ + abort(); \ + } \ + } while(0) +#else + #define INTEGRITY_CHECK_SLAB_METADATA(state, ctx) ((void)0) +#endif + +// ============================================================================ +// Error Codes +// ============================================================================ + +// 0x0000: Success +// 0xA001-0xA0FF: Slab metadata errors (Priority ALPHA) +// 0xB001-0xB0FF: TLS bounds errors (Priority 1) +// 0xC001-0xC0FF: Freelist pointer errors (Priority 2) +// 0xD001-0xD0FF: TLS canary errors (Priority 3) + +#define INTEGRITY_ERROR_OK 0x0000 + +// Priority ALPHA: Metadata errors +#define INTEGRITY_ERROR_METADATA_CARVED_OVERFLOW 0xA001 +#define INTEGRITY_ERROR_METADATA_USED_GT_CARVED 0xA002 +#define INTEGRITY_ERROR_METADATA_USED_OVERFLOW 0xA003 +#define INTEGRITY_ERROR_METADATA_FREE_COUNT_MISMATCH 0xA004 +#define INTEGRITY_ERROR_METADATA_CAPACITY_UNREASONABLE 0xA005 + +// Priority 1: TLS bounds errors +#define INTEGRITY_ERROR_TLS_BOUNDS_OVERFLOW 0xB001 + +// Priority 2: Freelist pointer errors +#define INTEGRITY_ERROR_FREELIST_PTR_OUT_OF_BOUNDS 0xC001 +#define INTEGRITY_ERROR_FREELIST_PTR_MISALIGNED 0xC002 + +// Priority 3: TLS canary errors +#define INTEGRITY_ERROR_CANARY_CORRUPTED_BEFORE_HEAD 0xD001 +#define INTEGRITY_ERROR_CANARY_CORRUPTED_AFTER_HEAD 0xD002 +#define INTEGRITY_ERROR_CANARY_CORRUPTED_BEFORE_COUNT 0xD003 +#define INTEGRITY_ERROR_CANARY_CORRUPTED_AFTER_COUNT 0xD004 + +#endif // INTEGRITY_BOX_H diff --git a/core/box/superslab_expansion_box.c b/core/box/superslab_expansion_box.c new file mode 100644 index 00000000..95f4aca4 --- /dev/null +++ b/core/box/superslab_expansion_box.c @@ -0,0 +1,262 @@ +// superslab_expansion_box.c - Box E: SuperSlab Expansion Implementation +// Purpose: Safe SuperSlab expansion with TLS state guarantee +// Box Theory: Complete encapsulation of expansion logic +// +// License: MIT +// Date: 2025-11-12 + +#include "superslab_expansion_box.h" +#include "../hakmem_tiny_superslab.h" // expand_superslab_head(), g_superslab_heads +#include "../hakmem_tiny_superslab_constants.h" // SUPERSLAB_SLAB0_DATA_OFFSET +#include +#include + +// External SuperSlabHead array (defined in hakmem_tiny_superslab.c) +extern SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS]; + +// External lock depth for safe fprintf in malloc context +extern __thread int g_hakmem_lock_depth; + +// ============================================================================ +// Box E: Core API Implementation +// ============================================================================ + +// Note: We don't implement expansion_capture_tls_state here because it requires +// access to g_tls_slabs, which is static in hakmem_tiny.c. The caller should +// capture state directly from their local g_tls_slabs reference. + +ExpansionResult expansion_expand_with_tls_guarantee( + SuperSlabHead* head, + uint8_t class_idx) +{ + ExpansionResult result; + memset(&result, 0, sizeof(result)); + result.success = false; + result.error_code = -2; // Invalid params + + // Validate parameters + if (!head || class_idx >= TINY_NUM_CLASSES_SS) { + return result; + } + + // CRITICAL: Call existing expand_superslab_head() with mutex protection + // This function already handles: + // 1. Mutex lock/unlock (head->expansion_lock) + // 2. Double-check pattern (re-verify after lock) + // 3. Chunk allocation and linking + // 4. current_chunk update + int expand_result = expand_superslab_head(head); + + if (expand_result < 0) { + // Expansion failed (OOM) + result.success = false; + result.error_code = -1; // OOM + return result; + } + + // Expansion succeeded + // CRITICAL FIX: Bind slab 0 immediately to prevent NULL meta SEGV + // The new chunk always has slab 0 available after expansion + SuperSlab* new_ss = head->current_chunk; + + // Initialize slab 0 metadata (set capacity, mark as active in bitmap) + extern void superslab_init_slab(SuperSlab* ss, int slab_idx, size_t block_size, uint32_t owner_tid); + extern const size_t g_tiny_class_sizes[]; + + uint32_t my_tid = (uint32_t)(uintptr_t)pthread_self(); + size_t block_size = g_tiny_class_sizes[class_idx]; + superslab_init_slab(new_ss, 0, block_size, my_tid); + + // Now bind slab 0 to TLS state + result.new_state.ss = new_ss; + result.new_state.class_idx = class_idx; + result.new_state.slab_idx = 0; // Always bind slab 0 after expansion + result.new_state.meta = &new_ss->slabs[0]; // Point to slab 0 metadata + + // Calculate slab_base using tiny_slab_base_for_geometry logic + // Slab 0 has offset SUPERSLAB_SLAB0_DATA_OFFSET (2048 bytes) + // Formula: base = ss + (slab_idx * SLAB_SIZE) + (slab_idx == 0 ? SLAB0_OFFSET : 0) + result.new_state.slab_base = (uint8_t*)new_ss + SUPERSLAB_SLAB0_DATA_OFFSET; + + result.success = true; + result.error_code = 0; + + return result; +} + +void expansion_apply_tls_state( + uint8_t class_idx, + const ExpansionTLSState* new_state, + TinyTLSSlab* tls_array) +{ + if (!new_state || !tls_array || class_idx >= TINY_NUM_CLASSES_SS) { + return; + } + + TinyTLSSlab* tls = &tls_array[class_idx]; + + // CRITICAL FIX: Apply complete TLS state from expansion + // This ensures meta and slab_base are NEVER NULL after expansion + tls->ss = new_state->ss; + tls->meta = new_state->meta; // ✅ Now points to slab 0! + tls->slab_base = new_state->slab_base; // ✅ Now points to slab 0 base! + tls->slab_idx = new_state->slab_idx; // ✅ Now 0 (slab 0) +} + +// ============================================================================ +// Box E: Debug & Validation Implementation +// ============================================================================ + +#if !defined(HAKMEM_BUILD_RELEASE) || defined(HAKMEM_EXPANSION_BOX_DEBUG) + +bool expansion_validate_tls_state( + const ExpansionTLSState* state, + const char* context) +{ + if (!state) { + return false; + } + + // Allow NULL ss (initial state before any allocation) + if (!state->ss) { + return true; + } + + // Validate SuperSlab magic + if (state->ss->magic != SUPERSLAB_MAGIC) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VAL] %s: Invalid SuperSlab magic: 0x%016llx (expected 0x%016llx)\n", + context, (unsigned long long)state->ss->magic, (unsigned long long)SUPERSLAB_MAGIC); + g_hakmem_lock_depth--; + return false; + } + + // Validate class consistency + if (state->ss->size_class != state->class_idx) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VAL] %s: Class mismatch: ss->size_class=%u, state->class_idx=%u\n", + context, state->ss->size_class, state->class_idx); + g_hakmem_lock_depth--; + return false; + } + + // Validate slab index bounds + int capacity = (state->ss->lg_size == 21) ? 32 : 16; // 2MB=32 slabs, 1MB=16 slabs + if (state->slab_idx >= capacity) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VAL] %s: slab_idx out of bounds: %u >= %d\n", + context, state->slab_idx, capacity); + g_hakmem_lock_depth--; + return false; + } + + // Validate meta pointer alignment (should point into ss->slabs array) + if (state->meta) { + TinySlabMeta* expected_meta = &state->ss->slabs[state->slab_idx]; + if (state->meta != expected_meta) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VAL] %s: meta pointer mismatch: %p (expected %p)\n", + context, (void*)state->meta, (void*)expected_meta); + g_hakmem_lock_depth--; + return false; + } + } + + // Validate slab_base alignment (should be within SuperSlab memory range) + if (state->slab_base) { + uintptr_t ss_start = (uintptr_t)state->ss; + size_t ss_size = (size_t)1 << state->ss->lg_size; + uintptr_t ss_end = ss_start + ss_size; + uintptr_t base_addr = (uintptr_t)state->slab_base; + + if (base_addr < ss_start || base_addr >= ss_end) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VAL] %s: slab_base out of range: %p (ss: %p - %p)\n", + context, (void*)state->slab_base, (void*)ss_start, (void*)ss_end); + g_hakmem_lock_depth--; + return false; + } + } + + return true; +} + +bool expansion_verify_expansion( + SuperSlabHead* head, + const ExpansionTLSState* old_state, + const ExpansionTLSState* new_state) +{ + if (!head || !old_state || !new_state) { + return false; + } + + // Verify new chunk is set + if (!new_state->ss) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VERIFY] New state has NULL SuperSlab\n"); + g_hakmem_lock_depth--; + return false; + } + + // Verify current_chunk was updated + if (head->current_chunk != new_state->ss) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VERIFY] current_chunk mismatch: head=%p, new_state=%p\n", + (void*)head->current_chunk, (void*)new_state->ss); + g_hakmem_lock_depth--; + return false; + } + + // Verify new chunk has available capacity (bitmap should not be full) + int capacity = (new_state->ss->lg_size == 21) ? 32 : 16; + uint32_t full_mask = (capacity >= 32) ? 0xFFFFFFFF : ((1U << capacity) - 1); + + if (new_state->ss->slab_bitmap == full_mask) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VERIFY] New chunk has no free slabs: bitmap=0x%08x\n", + new_state->ss->slab_bitmap); + g_hakmem_lock_depth--; + return false; + } + + // Verify total_chunks was incremented (if we can check old value) + // Note: We can't reliably check this without capturing old value + // But we can verify it's at least 1 + size_t total = atomic_load_explicit(&head->total_chunks, memory_order_relaxed); + if (total == 0) { + g_hakmem_lock_depth++; + fprintf(stderr, "[EXPANSION_VERIFY] total_chunks is 0 after expansion\n"); + g_hakmem_lock_depth--; + return false; + } + + return true; +} + +void expansion_log_event( + const char* event, + uint8_t class_idx, + const ExpansionTLSState* state) +{ + if (!event || !state) { + return; + } + + g_hakmem_lock_depth++; + + if (state->ss) { + fprintf(stderr, "[EXPANSION] class=%u %s: ss=%p, bitmap=0x%08x, active=%u, slab_idx=%u\n", + class_idx, event, + (void*)state->ss, + state->ss->slab_bitmap, + state->ss->active_slabs, + state->slab_idx); + } else { + fprintf(stderr, "[EXPANSION] class=%u %s: ss=NULL (initial state)\n", + class_idx, event); + } + + g_hakmem_lock_depth--; +} + +#endif // !HAKMEM_BUILD_RELEASE || HAKMEM_EXPANSION_BOX_DEBUG diff --git a/core/box/superslab_expansion_box.d b/core/box/superslab_expansion_box.d new file mode 100644 index 00000000..73a0b468 --- /dev/null +++ b/core/box/superslab_expansion_box.d @@ -0,0 +1,36 @@ +core/box/superslab_expansion_box.o: core/box/superslab_expansion_box.c \ + core/box/superslab_expansion_box.h \ + core/box/../superslab/superslab_types.h \ + core/hakmem_tiny_superslab_constants.h core/box/../tiny_tls.h \ + core/box/../hakmem_tiny_superslab.h \ + core/box/../superslab/superslab_types.h \ + core/box/../superslab/superslab_inline.h \ + core/box/../superslab/superslab_types.h core/tiny_debug_ring.h \ + core/hakmem_build_flags.h core/tiny_remote.h \ + core/box/../superslab/../tiny_box_geometry.h \ + core/box/../superslab/../hakmem_tiny_superslab_constants.h \ + core/box/../superslab/../hakmem_tiny_config.h \ + core/box/../tiny_debug_ring.h core/box/../tiny_remote.h \ + core/box/../hakmem_tiny_superslab_constants.h \ + core/box/../hakmem_build_flags.h core/box/../hakmem_tiny_superslab.h \ + core/box/../hakmem_tiny_superslab_constants.h +core/box/superslab_expansion_box.h: +core/box/../superslab/superslab_types.h: +core/hakmem_tiny_superslab_constants.h: +core/box/../tiny_tls.h: +core/box/../hakmem_tiny_superslab.h: +core/box/../superslab/superslab_types.h: +core/box/../superslab/superslab_inline.h: +core/box/../superslab/superslab_types.h: +core/tiny_debug_ring.h: +core/hakmem_build_flags.h: +core/tiny_remote.h: +core/box/../superslab/../tiny_box_geometry.h: +core/box/../superslab/../hakmem_tiny_superslab_constants.h: +core/box/../superslab/../hakmem_tiny_config.h: +core/box/../tiny_debug_ring.h: +core/box/../tiny_remote.h: +core/box/../hakmem_tiny_superslab_constants.h: +core/box/../hakmem_build_flags.h: +core/box/../hakmem_tiny_superslab.h: +core/box/../hakmem_tiny_superslab_constants.h: diff --git a/core/box/superslab_expansion_box.h b/core/box/superslab_expansion_box.h new file mode 100644 index 00000000..c46d961a --- /dev/null +++ b/core/box/superslab_expansion_box.h @@ -0,0 +1,146 @@ +// superslab_expansion_box.h - Box E: SuperSlab Expansion with TLS State Guarantee +// Purpose: Encapsulates SuperSlab chunk expansion with Design by Contract +// Box Theory: Provides ACID-like guarantees for TLS state during expansion +// +// Design Principles: +// 1. Complete encapsulation - all expansion logic lives here +// 2. TLS state guarantee - caller's TLS pointers remain valid after expansion +// 3. Thread-safe - mutex protection for concurrent expansion attempts +// 4. Atomic state transitions - no partial updates visible to caller +// 5. Debug validation - compile-time optional invariant checking +// +// License: MIT +// Date: 2025-11-12 + +#ifndef HAKMEM_SUPERSLAB_EXPANSION_BOX_H +#define HAKMEM_SUPERSLAB_EXPANSION_BOX_H + +#include +#include +#include "../superslab/superslab_types.h" // SuperSlabHead, SuperSlab +#include "../tiny_tls.h" // TinyTLSSlab + +// ============================================================================ +// Box E: Type Definitions +// ============================================================================ + +// TLS state snapshot (immutable after capture) +typedef struct ExpansionTLSState { + SuperSlab* ss; // Current SuperSlab (may change after expansion) + TinySlabMeta* meta; // Current slab metadata + uint8_t* slab_base; // Current slab base pointer + uint8_t slab_idx; // Current slab index + uint8_t class_idx; // Size class + uint8_t _pad[2]; // Padding +} ExpansionTLSState; + +// Expansion operation result +typedef struct ExpansionResult { + bool success; // true if expansion succeeded + ExpansionTLSState new_state; // New TLS state (valid only if success=true) + int error_code; // Error code (0=success, -1=OOM, -2=invalid params) +} ExpansionResult; + +// ============================================================================ +// Box E: Core API - Design by Contract +// ============================================================================ + +// Capture current TLS state (immutable snapshot) +// Precondition: class_idx < TINY_NUM_CLASSES_SS +// Postcondition: Returns snapshot of current TLS state +ExpansionTLSState expansion_capture_tls_state(uint8_t class_idx); + +// Expand SuperSlabHead with TLS state guarantee +// Precondition: head != NULL, class_idx < TINY_NUM_CLASSES_SS +// Postcondition: If success, new_state contains valid TLS pointers to expanded chunk +// If failure, head->current_chunk unchanged +// Thread-safe: Yes (mutex protected) +ExpansionResult expansion_expand_with_tls_guarantee( + SuperSlabHead* head, + uint8_t class_idx +); + +// Apply new TLS state to current thread +// Precondition: new_state is valid (from successful expansion), tls_array != NULL +// Postcondition: tls_array[class_idx] updated atomically +// Note: tls_array should be g_tls_slabs from the caller's context +void expansion_apply_tls_state( + uint8_t class_idx, + const ExpansionTLSState* new_state, + TinyTLSSlab* tls_array // Pointer to g_tls_slabs from caller +); + +// ============================================================================ +// Box E: High-Level API - One-Call Expansion +// ============================================================================ + +// Safe expansion: capture → expand → apply in one call +// Returns true if expansion succeeded, false on OOM or invalid params +// Thread-safe: Yes +// +// Parameters: +// head: SuperSlabHead to expand +// class_idx: Size class index +// tls_array: Pointer to g_tls_slabs array (from caller's context) +// +// Usage: +// extern __thread TinyTLSSlab g_tls_slabs[]; // In caller's scope +// if (!expansion_safe_expand(head, class_idx, g_tls_slabs)) { +// // Expansion failed (OOM) +// return NULL; +// } +// // TLS state is now updated, reload local pointers +// TinyTLSSlab* tls = &g_tls_slabs[class_idx]; +// SuperSlab* ss = tls->ss; +static inline bool expansion_safe_expand(SuperSlabHead* head, uint8_t class_idx, TinyTLSSlab* tls_array) { + if (!head || class_idx >= TINY_NUM_CLASSES_SS || !tls_array) { + return false; + } + + ExpansionResult result = expansion_expand_with_tls_guarantee(head, class_idx); + if (!result.success) { + return false; + } + + expansion_apply_tls_state(class_idx, &result.new_state, tls_array); + return true; +} + +// ============================================================================ +// Box E: Debug & Validation (Conditional Compilation) +// ============================================================================ + +#if !defined(HAKMEM_BUILD_RELEASE) || defined(HAKMEM_EXPANSION_BOX_DEBUG) + +// Validate TLS state invariants +// Returns true if all invariants hold, false otherwise +bool expansion_validate_tls_state( + const ExpansionTLSState* state, + const char* context // For debug logging (e.g., "before_expansion") +); + +// Verify expansion succeeded correctly +// Returns true if expansion is valid, false otherwise +bool expansion_verify_expansion( + SuperSlabHead* head, + const ExpansionTLSState* old_state, + const ExpansionTLSState* new_state +); + +// Log expansion event (debug only) +void expansion_log_event( + const char* event, // Event name (e.g., "START", "SUCCESS", "FAILURE") + uint8_t class_idx, + const ExpansionTLSState* state +); + +#else + +// No-op in release builds +#define expansion_validate_tls_state(state, context) ((void)0, true) +#define expansion_verify_expansion(head, old, new) ((void)0, true) +#define expansion_log_event(event, cls, state) ((void)0) + +#endif + +#endif // HAKMEM_SUPERSLAB_EXPANSION_BOX_H diff --git a/core/box/tls_sll_box.h b/core/box/tls_sll_box.h index a794fd4d..22b53dac 100644 --- a/core/box/tls_sll_box.h +++ b/core/box/tls_sll_box.h @@ -29,6 +29,7 @@ #include "../hakmem_tiny_config.h" // For TINY_NUM_CLASSES #include "../hakmem_build_flags.h" #include "../tiny_region_id.h" // HEADER_MAGIC / HEADER_CLASS_MASK +#include "../hakmem_tiny_integrity.h" // PRIORITY 2: Freelist integrity checks // Debug guard: validate base pointer before SLL ops (Debug only) #if !HAKMEM_BUILD_RELEASE @@ -122,11 +123,26 @@ static inline bool tls_sll_push(int class_idx, void* ptr, uint32_t capacity) { // // Performance: 4-5 cycles static inline bool tls_sll_pop(int class_idx, void** out) { + // PRIORITY 1: Bounds check + HAK_CHECK_CLASS_IDX(class_idx, "tls_sll_pop"); + atomic_fetch_add(&g_integrity_check_class_bounds, 1); + void* base = g_tls_sll_head[class_idx]; if (!base) { return false; // SLL empty } + // PRIORITY 2: Validate base pointer BEFORE dereferencing +#if !HAKMEM_BUILD_RELEASE + if (!validate_ptr_range(base, "tls_sll_pop_base")) { + fprintf(stderr, "[TLS_SLL_POP] FATAL: Invalid BASE pointer!\n"); + fprintf(stderr, " class_idx=%d base=%p\n", class_idx, base); + fprintf(stderr, " g_tls_sll_count[%d]=%u\n", class_idx, g_tls_sll_count[class_idx]); + fflush(stderr); + abort(); + } +#endif + // Pop from SLL (reads next from base) // Phase 7: Read next pointer at header-safe offset #if HAKMEM_TINY_HEADER_CLASSIDX @@ -134,8 +150,36 @@ static inline bool tls_sll_pop(int class_idx, void** out) { #else const size_t next_offset = 0; #endif + + // PRIORITY 2: Validate that (base + next_offset) is safe to dereference BEFORE reading +#if !HAKMEM_BUILD_RELEASE + void* read_addr = (uint8_t*)base + next_offset; + if (!validate_ptr_range(read_addr, "tls_sll_pop_read_addr")) { + fprintf(stderr, "[TLS_SLL_POP] FATAL: Cannot safely read next pointer!\n"); + fprintf(stderr, " class_idx=%d base=%p read_addr=%p (base+%zu)\n", + class_idx, base, read_addr, next_offset); + fprintf(stderr, " g_tls_sll_count[%d]=%u\n", class_idx, g_tls_sll_count[class_idx]); + fflush(stderr); + abort(); + } + atomic_fetch_add(&g_integrity_check_freelist, 1); +#endif + tls_sll_debug_guard(class_idx, base, "pop"); void* next; PTR_NEXT_READ("tls_pop", class_idx, base, next_offset, next); + + // PRIORITY 2: Validate next pointer after reading it +#if !HAKMEM_BUILD_RELEASE + if (!validate_ptr_range(next, "tls_sll_pop_next")) { + fprintf(stderr, "[TLS_SLL_POP] FATAL: Invalid next pointer after read!\n"); + fprintf(stderr, " class_idx=%d base=%p next=%p next_offset=%zu\n", + class_idx, base, next, next_offset); + fprintf(stderr, " g_tls_sll_count[%d]=%u\n", class_idx, g_tls_sll_count[class_idx]); + fflush(stderr); + abort(); + } +#endif + g_tls_sll_head[class_idx] = next; if (g_tls_sll_count[class_idx] > 0) { g_tls_sll_count[class_idx]--; diff --git a/core/hakmem_tiny.c b/core/hakmem_tiny.c index 85c4d597..92bc8195 100644 --- a/core/hakmem_tiny.c +++ b/core/hakmem_tiny.c @@ -6,6 +6,7 @@ #include "hakmem_internal.h" #include "hakmem_syscall.h" // Phase 6.X P0 Fix: Box 3 syscall layer (bypasses LD_PRELOAD) #include "hakmem_tiny_magazine.h" +#include "hakmem_tiny_integrity.h" // PRIORITY 1-4: Corruption detection // Phase 1 modules (must come AFTER hakmem_tiny.h for TinyPool definition) #include "hakmem_tiny_batch_refill.h" // Phase 1: Batch refill/spill for mini-magazine #include "hakmem_tiny_stats.h" // Phase 1: Batched statistics (replaces XOR RNG) @@ -45,6 +46,14 @@ const size_t g_tiny_class_sizes[TINY_NUM_CLASSES] = { 1024 // Class 7: 1024 bytes }; +// ============================================================================ +// PRIORITY 1-4: Integrity Check Counters +// ============================================================================ +_Atomic uint64_t g_integrity_check_class_bounds = 0; +_Atomic uint64_t g_integrity_check_freelist = 0; +_Atomic uint64_t g_integrity_check_canary = 0; +_Atomic uint64_t g_integrity_check_header = 0; + // Build-time gate for debug counters (path/ultra). Default OFF. #ifndef HAKMEM_DEBUG_COUNTERS #define HAKMEM_DEBUG_COUNTERS 0 @@ -1101,13 +1110,23 @@ static __attribute__((cold, noinline, unused)) void* tiny_slow_alloc_fast(int cl int g_tls_sll_enable = 1; // HAKMEM_TINY_TLS_SLL=0 to disable // Phase 6-1.7: Export TLS variables for box refactor (Box 5/6 need access from hakmem.c) // CRITICAL FIX: Explicit initializers prevent SEGV from uninitialized TLS in worker threads +// PRIORITY 3: TLS Canaries - Add canaries around TLS arrays to detect buffer overruns +#define TLS_CANARY_MAGIC 0xDEADBEEFDEADBEEFULL +__thread uint64_t g_tls_canary_before_sll_head = TLS_CANARY_MAGIC; #ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR __thread void* g_tls_sll_head[TINY_NUM_CLASSES] = {0}; -__thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES] = {0}; #else static __thread void* g_tls_sll_head[TINY_NUM_CLASSES] = {0}; +#endif +__thread uint64_t g_tls_canary_after_sll_head = TLS_CANARY_MAGIC; + +__thread uint64_t g_tls_canary_before_sll_count = TLS_CANARY_MAGIC; +#ifdef HAKMEM_TINY_PHASE6_BOX_REFACTOR +__thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES] = {0}; +#else static __thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES] = {0}; #endif +__thread uint64_t g_tls_canary_after_sll_count = TLS_CANARY_MAGIC; static int g_tiny_ultra = 0; // HAKMEM_TINY_ULTRA=1 for SLL-only ultra mode static int g_ultra_validate = 0; // HAKMEM_TINY_ULTRA_VALIDATE=1 to enable per-pop validation // Ultra debug counters @@ -1753,8 +1772,31 @@ TinySlab* hak_tiny_owner_slab(void* ptr) { // Export wrapper functions for hakmem.c to call // Phase 6-1.7 Optimization: Remove diagnostic overhead, rely on LTO for inlining void* hak_tiny_alloc_fast_wrapper(size_t size) { + static _Atomic uint64_t wrapper_call_count = 0; + uint64_t call_num = atomic_fetch_add(&wrapper_call_count, 1); + + // PRIORITY 3: Periodic canary validation (every 1000 ops) + periodic_canary_check(call_num, "hak_tiny_alloc_fast_wrapper"); + + // Box I: Periodic full integrity check (every 5000 ops) + #if HAKMEM_INTEGRITY_LEVEL >= 3 + if ((call_num % 5000) == 0) { + extern void integrity_periodic_full_check(const char*); + integrity_periodic_full_check("periodic check in alloc wrapper"); + } + #endif + + if (call_num > 14250 && call_num < 14280 && size <= 1024) { + fprintf(stderr, "[HAK_TINY_ALLOC_FAST_WRAPPER] call=%lu size=%zu\n", call_num, size); + fflush(stderr); + } // Diagnostic removed - use HAKMEM_TINY_FRONT_DIAG in tiny_alloc_fast_pop if needed - return tiny_alloc_fast(size); + void* result = tiny_alloc_fast(size); + if (call_num > 14250 && call_num < 14280 && size <= 1024) { + fprintf(stderr, "[HAK_TINY_ALLOC_FAST_WRAPPER] call=%lu returned %p\n", call_num, result); + fflush(stderr); + } + return result; } void hak_tiny_free_fast_wrapper(void* ptr) { diff --git a/core/hakmem_tiny.d b/core/hakmem_tiny.d index f4e41af2..60ea13d8 100644 --- a/core/hakmem_tiny.d +++ b/core/hakmem_tiny.d @@ -12,24 +12,26 @@ core/hakmem_tiny.o: core/hakmem_tiny.c core/hakmem_tiny.h \ core/hakmem_super_registry.h core/hakmem_internal.h core/hakmem.h \ core/hakmem_config.h core/hakmem_features.h core/hakmem_sys.h \ core/hakmem_whale.h core/hakmem_syscall.h core/hakmem_tiny_magazine.h \ - core/hakmem_tiny_batch_refill.h core/hakmem_tiny_stats.h core/tiny_api.h \ - core/hakmem_tiny_stats_api.h core/hakmem_tiny_query_api.h \ - core/hakmem_tiny_rss_api.h core/hakmem_tiny_registry_api.h \ - core/tiny_tls.h core/tiny_debug.h core/tiny_mmap_gate.h \ - core/tiny_refill.h core/slab_handle.h core/tiny_sticky.h \ - core/tiny_ready.h core/box/mailbox_box.h core/hakmem_tiny_superslab.h \ - core/tiny_remote_bg.h core/hakmem_tiny_remote_target.h \ - core/tiny_ready_bg.h core/tiny_route.h core/box/adopt_gate_box.h \ - core/tiny_tls_guard.h core/hakmem_tiny_tls_list.h core/tiny_nextptr.h \ + core/hakmem_tiny_integrity.h core/hakmem_tiny_batch_refill.h \ + core/hakmem_tiny_stats.h core/tiny_api.h core/hakmem_tiny_stats_api.h \ + core/hakmem_tiny_query_api.h core/hakmem_tiny_rss_api.h \ + core/hakmem_tiny_registry_api.h core/tiny_tls.h core/tiny_debug.h \ + core/tiny_mmap_gate.h core/tiny_refill.h core/slab_handle.h \ + core/tiny_sticky.h core/tiny_ready.h core/box/mailbox_box.h \ + core/hakmem_tiny_superslab.h core/tiny_remote_bg.h \ + core/hakmem_tiny_remote_target.h core/tiny_ready_bg.h core/tiny_route.h \ + core/box/adopt_gate_box.h core/tiny_tls_guard.h \ + core/hakmem_tiny_tls_list.h core/tiny_nextptr.h \ core/hakmem_tiny_bg_spill.h core/tiny_adaptive_sizing.h \ core/tiny_system.h core/hakmem_prof.h core/tiny_publish.h \ core/box/tls_sll_box.h core/box/../ptr_trace.h \ core/box/../hakmem_tiny_config.h core/box/../hakmem_build_flags.h \ core/box/../tiny_region_id.h core/box/../hakmem_build_flags.h \ - core/hakmem_tiny_hotmag.inc.h core/hakmem_tiny_hot_pop.inc.h \ - core/hakmem_tiny_fastcache.inc.h core/hakmem_tiny_refill.inc.h \ - core/tiny_box_geometry.h core/hakmem_tiny_refill_p0.inc.h \ - core/tiny_refill_opt.h core/tiny_fc_api.h \ + core/box/../hakmem_tiny_integrity.h core/hakmem_tiny_hotmag.inc.h \ + core/hakmem_tiny_hot_pop.inc.h core/hakmem_tiny_fastcache.inc.h \ + core/hakmem_tiny_refill.inc.h core/tiny_box_geometry.h \ + core/hakmem_tiny_refill_p0.inc.h core/tiny_refill_opt.h \ + core/tiny_fc_api.h core/box/integrity_box.h \ core/hakmem_tiny_ultra_front.inc.h core/hakmem_tiny_intel.inc \ core/hakmem_tiny_background.inc core/hakmem_tiny_bg_bin.inc.h \ core/hakmem_tiny_tls_ops.h core/hakmem_tiny_remote.inc \ @@ -41,6 +43,8 @@ core/hakmem_tiny.o: core/hakmem_tiny.c core/hakmem_tiny.h \ core/hakmem_tiny_slow.inc core/hakmem_tiny_free.inc \ core/box/free_publish_box.h core/mid_tcache.h \ core/tiny_free_magazine.inc.h core/tiny_superslab_alloc.inc.h \ + core/box/superslab_expansion_box.h \ + core/box/../superslab/superslab_types.h core/box/../tiny_tls.h \ core/tiny_superslab_free.inc.h core/box/free_remote_box.h \ core/box/free_local_box.h core/hakmem_tiny_lifecycle.inc \ core/hakmem_tiny_slab_mgmt.inc @@ -72,6 +76,7 @@ core/hakmem_sys.h: core/hakmem_whale.h: core/hakmem_syscall.h: core/hakmem_tiny_magazine.h: +core/hakmem_tiny_integrity.h: core/hakmem_tiny_batch_refill.h: core/hakmem_tiny_stats.h: core/tiny_api.h: @@ -107,6 +112,7 @@ core/box/../hakmem_tiny_config.h: core/box/../hakmem_build_flags.h: core/box/../tiny_region_id.h: core/box/../hakmem_build_flags.h: +core/box/../hakmem_tiny_integrity.h: core/hakmem_tiny_hotmag.inc.h: core/hakmem_tiny_hot_pop.inc.h: core/hakmem_tiny_fastcache.inc.h: @@ -115,6 +121,7 @@ core/tiny_box_geometry.h: core/hakmem_tiny_refill_p0.inc.h: core/tiny_refill_opt.h: core/tiny_fc_api.h: +core/box/integrity_box.h: core/hakmem_tiny_ultra_front.inc.h: core/hakmem_tiny_intel.inc: core/hakmem_tiny_background.inc: @@ -137,6 +144,9 @@ core/box/free_publish_box.h: core/mid_tcache.h: core/tiny_free_magazine.inc.h: core/tiny_superslab_alloc.inc.h: +core/box/superslab_expansion_box.h: +core/box/../superslab/superslab_types.h: +core/box/../tiny_tls.h: core/tiny_superslab_free.inc.h: core/box/free_remote_box.h: core/box/free_local_box.h: diff --git a/core/hakmem_tiny_integrity.h b/core/hakmem_tiny_integrity.h new file mode 100644 index 00000000..b6c28662 --- /dev/null +++ b/core/hakmem_tiny_integrity.h @@ -0,0 +1,210 @@ +#ifndef HAKMEM_TINY_INTEGRITY_H +#define HAKMEM_TINY_INTEGRITY_H + +#include +#include +#include +#include +#include "hakmem_tiny.h" + +// ============================================================================ +// PRIORITY 1: TLS Array Bounds Checks +// ============================================================================ + +// Macro for bounds checking class_idx before TLS array access +#define HAK_CHECK_CLASS_IDX(class_idx, label) do { \ + if (__builtin_expect((class_idx) < 0 || (class_idx) >= TINY_NUM_CLASSES, 0)) { \ + fprintf(stderr, "[%s] FATAL: class_idx=%d out of bounds [0,%d) at %s:%d\n", \ + (label), (class_idx), TINY_NUM_CLASSES, __FILE__, __LINE__); \ + fflush(stderr); \ + assert(0 && "TLS array index out of bounds"); \ + abort(); \ + } \ +} while(0) + +// ============================================================================ +// PRIORITY 2: Freelist Integrity Checks +// ============================================================================ + +// Validate freelist next pointer is within slab bounds +static inline int validate_freelist_next(void* ptr, void* next, + void* slab_base, size_t stride, + uint8_t class_idx, + size_t num_blocks, + const char* location) { + if (next == NULL) return 1; // NULL is valid (end of list) + + void* slab_end = (uint8_t*)slab_base + (num_blocks * stride); + + if (next < slab_base || next >= slab_end) { + fprintf(stderr, "[FREELIST_CORRUPT] %s: ptr=%p next=%p slab=[%p,%p) class=%d stride=%zu\n", + location, ptr, next, slab_base, slab_end, class_idx, stride); + fprintf(stderr, "[FREELIST_CORRUPT] next is OUT OF BOUNDS by %td bytes\n", + (uint8_t*)next < (uint8_t*)slab_base ? + ((uint8_t*)slab_base - (uint8_t*)next) : + ((uint8_t*)next - (uint8_t*)slab_end)); + fflush(stderr); + assert(0 && "Freelist next pointer out of slab bounds"); + return 0; + } + + // Additional check: next pointer should be stride-aligned within slab + ptrdiff_t offset = (uint8_t*)next - (uint8_t*)slab_base; + if (offset % stride != 0) { + fprintf(stderr, "[FREELIST_MISALIGN] %s: ptr=%p next=%p offset=%td stride=%zu class=%d\n", + location, ptr, next, offset, stride, class_idx); + fprintf(stderr, "[FREELIST_MISALIGN] offset %% stride = %td (should be 0)\n", + offset % stride); + fflush(stderr); + assert(0 && "Freelist next pointer misaligned"); + return 0; + } + + return 1; +} + +// Validate pointer is within valid address range (basic sanity) +static inline int validate_ptr_range(void* ptr, const char* location) { + if (ptr == NULL) return 1; // NULL is valid in some contexts + + // Check for obviously invalid pointers + uintptr_t addr = (uintptr_t)ptr; + + // DIAGNOSTIC: One-time log to confirm this function is actually running + static volatile int g_validate_logged = 0; + if (__builtin_expect(g_validate_logged == 0, 0)) { + g_validate_logged = 1; + fprintf(stderr, "[VALIDATE_PTR_RANGE] First call: %s ptr=%p\n", location, ptr); + fflush(stderr); + } + + // Check for very low addresses (NULL-ish, likely corruption) + if (addr < 0x1000) { + fprintf(stderr, "[PTR_INVALID] %s: ptr=%p is suspiciously low (< 4KB)\n", + location, ptr); + fflush(stderr); + abort(); // Force abort (ignore assert settings) + } + + // Check for very high addresses (kernel space on x86-64) + if (addr > 0x7fffffffffffULL) { + fprintf(stderr, "[PTR_INVALID] %s: ptr=%p is in kernel space range\n", + location, ptr); + fflush(stderr); + abort(); // Force abort + } + + // Check for uninitialized/debug fill patterns (0xa2, 0xcc, 0xdd, 0xfe) + uint8_t* bytes = (uint8_t*)&addr; + if (bytes[0] == bytes[1] && bytes[1] == bytes[2] && bytes[2] == bytes[3] && + bytes[3] == bytes[4] && bytes[4] == bytes[5] && bytes[5] == bytes[6] && + bytes[6] == bytes[7]) { + // All bytes are the same - check for common debug patterns + if (bytes[0] == 0xa2 || bytes[0] == 0xcc || bytes[0] == 0xdd || bytes[0] == 0xfe) { + fprintf(stderr, "[PTR_INVALID] %s: ptr=%p is uninitialized (pattern 0x%02x)\n", + location, ptr, bytes[0]); + fprintf(stderr, "[PTR_INVALID] This indicates use-before-initialization!\n"); + fprintf(stderr, "[PTR_INVALID] Common patterns: 0xa2=ASan, 0xcc=MSVC, 0xdd=freed, 0xfe=heap\n"); + fflush(stderr); + abort(); // Force abort + } + } + + return 1; +} + +// ============================================================================ +// PRIORITY 3: TLS Canaries +// ============================================================================ + +#define TLS_CANARY_MAGIC 0xDEADBEEFDEADBEEFULL + +// External declarations (defined in hakmem_tiny.c) +extern __thread uint64_t g_tls_canary_before_sll_head; +extern __thread uint64_t g_tls_canary_after_sll_head; +extern __thread uint64_t g_tls_canary_before_sll_count; +extern __thread uint64_t g_tls_canary_after_sll_count; + +// Validate TLS canaries (call periodically) +static inline void validate_tls_canaries(const char* location) { + if (g_tls_canary_before_sll_head != TLS_CANARY_MAGIC) { + fprintf(stderr, "[TLS_CANARY] %s: g_tls_sll_head BEFORE canary corrupted: 0x%016lx (expected 0x%016lx)\n", + location, g_tls_canary_before_sll_head, TLS_CANARY_MAGIC); + fflush(stderr); + assert(0 && "TLS canary before sll_head corrupted"); + } + if (g_tls_canary_after_sll_head != TLS_CANARY_MAGIC) { + fprintf(stderr, "[TLS_CANARY] %s: g_tls_sll_head AFTER canary corrupted: 0x%016lx (expected 0x%016lx)\n", + location, g_tls_canary_after_sll_head, TLS_CANARY_MAGIC); + fflush(stderr); + assert(0 && "TLS canary after sll_head corrupted"); + } + if (g_tls_canary_before_sll_count != TLS_CANARY_MAGIC) { + fprintf(stderr, "[TLS_CANARY] %s: g_tls_sll_count BEFORE canary corrupted: 0x%016lx (expected 0x%016lx)\n", + location, g_tls_canary_before_sll_count, TLS_CANARY_MAGIC); + fflush(stderr); + assert(0 && "TLS canary before sll_count corrupted"); + } + if (g_tls_canary_after_sll_count != TLS_CANARY_MAGIC) { + fprintf(stderr, "[TLS_CANARY] %s: g_tls_sll_count AFTER canary corrupted: 0x%016lx (expected 0x%016lx)\n", + location, g_tls_canary_after_sll_count, TLS_CANARY_MAGIC); + fflush(stderr); + assert(0 && "TLS canary after sll_count corrupted"); + } +} + +// Periodic canary check (call every N operations) +static inline void periodic_canary_check(uint64_t counter, const char* location) { + if (counter % 1000 == 0) { + validate_tls_canaries(location); + } +} + +// ============================================================================ +// PRIORITY 4: Header Write Validation +// ============================================================================ + +// Validate header write parameters +static inline void validate_header_write(void* base_ptr, uint8_t class_idx, const char* location) { + if (base_ptr == NULL) { + fprintf(stderr, "[HEADER_WRITE] %s: NULL base pointer for class=%d\n", + location, class_idx); + fflush(stderr); + assert(0 && "NULL base pointer in header write"); + } + + if (class_idx >= 7) { // Class 7 is headerless + fprintf(stderr, "[HEADER_WRITE] %s: Invalid class_idx=%d for header write (class 7 is headerless)\n", + location, class_idx); + fflush(stderr); + assert(0 && "Invalid class_idx for header write"); + } + + if (!validate_ptr_range(base_ptr, location)) { + fprintf(stderr, "[HEADER_WRITE] %s: base_ptr=%p failed range validation\n", + location, base_ptr); + fflush(stderr); + assert(0 && "Header write pointer failed range validation"); + } +} + +// ============================================================================ +// Debug Counters for Integrity Checks +// ============================================================================ + +extern _Atomic uint64_t g_integrity_check_class_bounds; +extern _Atomic uint64_t g_integrity_check_freelist; +extern _Atomic uint64_t g_integrity_check_canary; +extern _Atomic uint64_t g_integrity_check_header; + +static inline void integrity_stats_dump(void) { + fprintf(stderr, "\n=== INTEGRITY CHECK STATISTICS ===\n"); + fprintf(stderr, "Class bounds checks: %lu\n", g_integrity_check_class_bounds); + fprintf(stderr, "Freelist checks: %lu\n", g_integrity_check_freelist); + fprintf(stderr, "Canary checks: %lu\n", g_integrity_check_canary); + fprintf(stderr, "Header write checks: %lu\n", g_integrity_check_header); + fprintf(stderr, "==================================\n"); + fflush(stderr); +} + +#endif // HAKMEM_TINY_INTEGRITY_H diff --git a/core/hakmem_tiny_refill.inc.h b/core/hakmem_tiny_refill.inc.h index b4500834..9a90c0a5 100644 --- a/core/hakmem_tiny_refill.inc.h +++ b/core/hakmem_tiny_refill.inc.h @@ -26,6 +26,7 @@ #include "hakmem_super_registry.h" // For hak_super_lookup (Debug validation) #include "superslab/superslab_inline.h" // For slab_index_for/ss_slabs_capacity (Debug validation) #include "box/tls_sll_box.h" // Box TLS-SLL: Safe SLL operations API +#include "hakmem_tiny_integrity.h" // PRIORITY 1-4: Corruption detection #include #include #include @@ -292,6 +293,10 @@ __attribute__((noinline)) int sll_refill_small_from_ss(int class_idx, int max_ta #else static inline int sll_refill_small_from_ss(int class_idx, int max_take) { #endif + // PRIORITY 1: Bounds check before TLS array access + HAK_CHECK_CLASS_IDX(class_idx, "sll_refill_small_from_ss"); + atomic_fetch_add(&g_integrity_check_class_bounds, 1); + // CRITICAL: C7 (1KB) is headerless - incompatible with TLS SLL refill if (__builtin_expect(class_idx == 7, 0)) { return 0; // C7 uses slow path exclusively diff --git a/core/hakmem_tiny_refill_p0.inc.h b/core/hakmem_tiny_refill_p0.inc.h index 9b64893b..a2c462f1 100644 --- a/core/hakmem_tiny_refill_p0.inc.h +++ b/core/hakmem_tiny_refill_p0.inc.h @@ -32,6 +32,7 @@ extern unsigned long long g_rf_early_want_zero[]; // Line 55: want == 0 #include "tiny_refill_opt.h" #include "tiny_fc_api.h" #include "superslab/superslab_inline.h" // For _ss_remote_drain_to_freelist_unsafe() +#include "box/integrity_box.h" // Box I: Integrity verification (Priority ALPHA) // Optional P0 diagnostic logging helper static inline int p0_should_log(void) { static int en = -1; @@ -98,6 +99,18 @@ static inline int sll_refill_batch_from_ss(int class_idx, int max_take) { #if HAKMEM_DEBUG_COUNTERS g_rf_early_no_meta[class_idx]++; #endif + return 0; + } + + /* BOX_BOUNDARY: Box 2 (Refill) → Box I (Integrity Check) */ + #if HAKMEM_INTEGRITY_LEVEL >= 4 + uint8_t* initial_slab_base = tls->slab_base ? tls->slab_base : tiny_slab_base_for(tls->ss, tls->slab_idx); + SlabMetadataState meta_initial = integrity_capture_slab_metadata(meta, initial_slab_base, class_idx); + INTEGRITY_CHECK_SLAB_METADATA(meta_initial, "P0 refill entry"); + #endif + /* BOX_BOUNDARY: Box I → Box 2 (Integrity Verified) */ + + if (!meta) { if (__builtin_expect(class_idx == 7 && p0_should_log(), 0)) { fprintf(stderr, "[P0_DEBUG_C7] meta is NULL after superslab_refill, returning 0\n"); } @@ -271,6 +284,15 @@ static inline int sll_refill_batch_from_ss(int class_idx, int max_take) { ss_active_add(tls->ss, from_freelist); // FIX: Keep TinySlabMeta::used consistent with non-P0 path meta->used = (uint16_t)((uint32_t)meta->used + from_freelist); + + /* BOX_BOUNDARY: Box 2 → Box I (Verify metadata after freelist pop) */ + #if HAKMEM_INTEGRITY_LEVEL >= 4 + SlabMetadataState meta_after_freelist = integrity_capture_slab_metadata( + meta, ss_base, class_idx); + INTEGRITY_CHECK_SLAB_METADATA(meta_after_freelist, "P0 after freelist pop"); + #endif + /* BOX_BOUNDARY: Box I → Box 2 */ + extern unsigned long long g_rf_freelist_items[]; g_rf_freelist_items[class_idx] += from_freelist; total_taken += from_freelist; @@ -287,6 +309,16 @@ static inline int sll_refill_batch_from_ss(int class_idx, int max_take) { tls = &g_tls_slabs[class_idx]; meta = tls->meta; if (!meta) break; + + /* BOX_BOUNDARY: Box 2 → Box I (Verify new slab after superslab_refill) */ + #if HAKMEM_INTEGRITY_LEVEL >= 4 + uint8_t* new_slab_base = tls->slab_base ? tls->slab_base : tiny_slab_base_for(tls->ss, tls->slab_idx); + SlabMetadataState meta_after_refill = integrity_capture_slab_metadata( + meta, new_slab_base, class_idx); + INTEGRITY_CHECK_SLAB_METADATA(meta_after_refill, "P0 after superslab_refill"); + #endif + /* BOX_BOUNDARY: Box I → Box 2 */ + continue; } @@ -349,6 +381,15 @@ static inline int sll_refill_batch_from_ss(int class_idx, int max_take) { trc_splice_to_sll(class_idx, &carve, &g_tls_sll_head[class_idx], &g_tls_sll_count[class_idx]); // FIX: Update SuperSlab active counter (was missing!) ss_active_add(tls->ss, batch); + + /* BOX_BOUNDARY: Box 2 → Box I (Verify metadata after linear carve) */ + #if HAKMEM_INTEGRITY_LEVEL >= 4 + SlabMetadataState meta_after_carve = integrity_capture_slab_metadata( + meta, slab_base, class_idx); + INTEGRITY_CHECK_SLAB_METADATA(meta_after_carve, "P0 after linear carve"); + #endif + /* BOX_BOUNDARY: Box I → Box 2 */ + extern unsigned long long g_rf_carve_items[]; g_rf_carve_items[class_idx] += batch; diff --git a/core/hakmem_tiny_superslab.c b/core/hakmem_tiny_superslab.c index 84f4428d..dad34f7a 100644 --- a/core/hakmem_tiny_superslab.c +++ b/core/hakmem_tiny_superslab.c @@ -479,13 +479,22 @@ SuperSlab* superslab_allocate(uint8_t size_class) { // Initialize all slab metadata (only up to max slabs for this size) int max_slabs = (int)(ss_size / SLAB_SIZE); + + // DEFENSIVE FIX: Zero all slab metadata arrays to prevent ANY uninitialized pointers + // This catches the 0xa2a2a2a2a2a2a2a2 pattern bug (ASan/debug fill pattern) + // Even though mmap should return zeroed pages, sanitizers may fill with debug patterns + memset(ss->slabs, 0, max_slabs * sizeof(TinySlabMeta)); + memset(ss->remote_heads, 0, max_slabs * sizeof(uintptr_t)); + memset(ss->remote_counts, 0, max_slabs * sizeof(uint32_t)); + memset(ss->slab_listed, 0, max_slabs * sizeof(uint32_t)); + for (int i = 0; i < max_slabs; i++) { - ss->slabs[i].freelist = NULL; + ss->slabs[i].freelist = NULL; // Explicit NULL (redundant after memset, but clear intent) ss->slabs[i].used = 0; ss->slabs[i].capacity = 0; ss->slabs[i].owner_tid = 0; - // Initialize remote queue atomics + // Initialize remote queue atomics (memset already zeroed, but use proper atomic init) atomic_store_explicit(&ss->remote_heads[i], 0, memory_order_relaxed); atomic_store_explicit(&ss->remote_counts[i], 0, memory_order_relaxed); atomic_store_explicit(&ss->slab_listed[i], 0, memory_order_relaxed); diff --git a/core/tiny_alloc_fast.inc.h b/core/tiny_alloc_fast.inc.h index 226898de..7486717a 100644 --- a/core/tiny_alloc_fast.inc.h +++ b/core/tiny_alloc_fast.inc.h @@ -24,6 +24,7 @@ #ifdef HAKMEM_TINY_FRONT_GATE_BOX #include "box/front_gate_box.h" #endif +#include "hakmem_tiny_integrity.h" // PRIORITY 1-4: Corruption detection #include // Phase 7 Task 2: Aggressive inline TLS cache access @@ -184,6 +185,10 @@ extern int g_sfc_enabled; // // Expected: 3-4 instructions on SFC hit, 6-8 on SLL hit static inline void* tiny_alloc_fast_pop(int class_idx) { + // PRIORITY 1: Bounds check before any TLS array access + HAK_CHECK_CLASS_IDX(class_idx, "tiny_alloc_fast_pop"); + atomic_fetch_add(&g_integrity_check_class_bounds, 1); + // CRITICAL: C7 (1KB) is headerless - delegate to slow path completely // Reason: Fast path uses SLL which stores next pointer in user data area // C7's headerless design is incompatible with fast path assumptions @@ -306,6 +311,10 @@ static inline int sfc_cascade_pct(void) { } static inline int sfc_refill_from_sll(int class_idx, int target_count) { + // PRIORITY 1: Bounds check + HAK_CHECK_CLASS_IDX(class_idx, "sfc_refill_from_sll"); + atomic_fetch_add(&g_integrity_check_class_bounds, 1); + int transferred = 0; uint32_t cap = g_sfc_capacity[class_idx]; @@ -509,11 +518,29 @@ static inline int tiny_alloc_fast_refill(int class_idx) { // // OOM handling // } static inline void* tiny_alloc_fast(size_t size) { + static _Atomic uint64_t alloc_call_count = 0; + uint64_t call_num = atomic_fetch_add(&alloc_call_count, 1); + // 1. Size → class index (inline, fast) int class_idx = hak_tiny_size_to_class(size); if (__builtin_expect(class_idx < 0, 0)) { return NULL; // Size > 1KB, not Tiny } + + // CRITICAL: Bounds check to catch corruption + if (__builtin_expect(class_idx >= TINY_NUM_CLASSES, 0)) { + fprintf(stderr, "[TINY_ALLOC_FAST] FATAL: class_idx=%d out of bounds! size=%zu call=%lu\n", + class_idx, size, call_num); + fflush(stderr); + abort(); + } + + // Debug logging near crash point + if (call_num > 14250 && call_num < 14280) { + fprintf(stderr, "[TINY_ALLOC] call=%lu size=%zu class=%d\n", call_num, size, class_idx); + fflush(stderr); + } + ROUTE_BEGIN(class_idx); void* ptr = NULL; const int hot_c5 = (g_tiny_hotpath_class5 && class_idx == 5); @@ -536,7 +563,15 @@ static inline void* tiny_alloc_fast(size_t size) { } // Generic front (FastCache/SFC/SLL) + if (call_num > 14250 && call_num < 14280) { + fprintf(stderr, "[TINY_ALLOC] call=%lu before fast_pop\n", call_num); + fflush(stderr); + } ptr = tiny_alloc_fast_pop(class_idx); + if (call_num > 14250 && call_num < 14280) { + fprintf(stderr, "[TINY_ALLOC] call=%lu after fast_pop ptr=%p\n", call_num, ptr); + fflush(stderr); + } if (__builtin_expect(ptr != NULL, 1)) { HAK_RET_ALLOC(class_idx, ptr); } diff --git a/core/tiny_free_fast_v2.inc.h b/core/tiny_free_fast_v2.inc.h index 13acf4f2..b76735d8 100644 --- a/core/tiny_free_fast_v2.inc.h +++ b/core/tiny_free_fast_v2.inc.h @@ -19,6 +19,7 @@ #include "hakmem_build_flags.h" #include "hakmem_tiny_config.h" // For TINY_TLS_MAG_CAP, TINY_NUM_CLASSES #include "box/tls_sll_box.h" // Box TLS-SLL API +#include "hakmem_tiny_integrity.h" // PRIORITY 1-4: Corruption detection // Phase 7: Header-based ultra-fast free #if HAKMEM_TINY_HEADER_CLASSIDX @@ -105,6 +106,16 @@ static inline int hak_tiny_free_fast_v2(void* ptr) { return 0; } + // PRIORITY 1: Bounds check on class_idx from header + if (__builtin_expect(class_idx >= TINY_NUM_CLASSES, 0)) { + fprintf(stderr, "[TINY_FREE_V2] FATAL: class_idx=%d out of bounds (from header at %p)\n", + class_idx, ptr); + fflush(stderr); + assert(0 && "class_idx from header out of bounds"); + return 0; + } + atomic_fetch_add(&g_integrity_check_class_bounds, 1); + // 2. Check TLS freelist capacity (optional, for bounded cache) // Note: Can be disabled in release for maximum speed #if !HAKMEM_BUILD_RELEASE diff --git a/core/tiny_superslab_alloc.inc.h b/core/tiny_superslab_alloc.inc.h index 209fae50..cdcb324e 100644 --- a/core/tiny_superslab_alloc.inc.h +++ b/core/tiny_superslab_alloc.inc.h @@ -8,6 +8,8 @@ // - superslab_refill(): Refill TLS slab (adoption, registry scan, fresh alloc) // - hak_tiny_alloc_superslab(): Main SuperSlab allocation entry point +#include "box/superslab_expansion_box.h" // Box E: Expansion with TLS state guarantee + // ============================================================================ // Phase 6.23: SuperSlab Allocation Helpers // ============================================================================ @@ -248,43 +250,49 @@ static SuperSlab* superslab_refill(int class_idx) { g_hakmem_lock_depth--; #endif - // Protect expansion with global lock (race condition fix) - static pthread_mutex_t expand_lock = PTHREAD_MUTEX_INITIALIZER; - pthread_mutex_lock(&expand_lock); - - // Re-check after acquiring lock (another thread may have expanded) - current_chunk = head->current_chunk; - uint32_t recheck_mask = (ss_slabs_capacity(current_chunk) >= 32) ? 0xFFFFFFFF : - ((1U << ss_slabs_capacity(current_chunk)) - 1); - - if (current_chunk->slab_bitmap == recheck_mask) { - // Still exhausted, expand now - if (expand_superslab_head(head) < 0) { - pthread_mutex_unlock(&expand_lock); -#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE) - g_hakmem_lock_depth++; - fprintf(stderr, "[HAKMEM] CRITICAL: Failed to expand SuperSlabHead for class %d (system OOM)\n", class_idx); - g_hakmem_lock_depth--; -#endif - return NULL; // True system OOM - } - + /* BOX_BOUNDARY: Box 4 → Box E (SuperSlab Expansion) */ + extern __thread TinyTLSSlab g_tls_slabs[]; + if (!expansion_safe_expand(head, class_idx, g_tls_slabs)) { + // Expansion failed (OOM or capacity limit) #if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE) g_hakmem_lock_depth++; - fprintf(stderr, "[HAKMEM] Successfully expanded SuperSlabHead for class %d\n", class_idx); + fprintf(stderr, "[HAKMEM] CRITICAL: Failed to expand SuperSlabHead for class %d (system OOM)\n", class_idx); g_hakmem_lock_depth--; #endif + return NULL; + } + /* BOX_BOUNDARY: Box E → Box 4 (TLS state guaranteed) */ + + // TLS state is now correct, reload local pointers + tls = &g_tls_slabs[class_idx]; + current_chunk = tls->ss; + +#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE) + g_hakmem_lock_depth++; + fprintf(stderr, "[HAKMEM] Successfully expanded SuperSlabHead for class %d\n", class_idx); + fprintf(stderr, "[HAKMEM] Box E bound slab 0: meta=%p slab_base=%p capacity=%u\n", + (void*)tls->meta, (void*)tls->slab_base, tls->meta ? tls->meta->capacity : 0); + g_hakmem_lock_depth--; +#endif + + // CRITICAL: Box E already initialized and bound slab 0 + // Return immediately to avoid double-initialization in refill logic + if (tls->meta && tls->slab_base) { + // Verify slab 0 is properly initialized + if (tls->slab_idx == 0 && tls->meta->capacity > 0) { +#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE) + g_hakmem_lock_depth++; + fprintf(stderr, "[HAKMEM] Returning new chunk with bound slab 0 (capacity=%u)\n", tls->meta->capacity); + g_hakmem_lock_depth--; +#endif + return tls->ss; + } } - // Update current_chunk and tls->ss to point to (potentially new) chunk - current_chunk = head->current_chunk; - tls->ss = current_chunk; - pthread_mutex_unlock(&expand_lock); - - // Verify chunk has free slabs - full_mask = (ss_slabs_capacity(current_chunk) >= 32) ? 0xFFFFFFFF : + // Verify chunk has free slabs (fallback safety check) + uint32_t full_mask_check = (ss_slabs_capacity(current_chunk) >= 32) ? 0xFFFFFFFF : ((1U << ss_slabs_capacity(current_chunk)) - 1); - if (!current_chunk || current_chunk->slab_bitmap == full_mask) { + if (!current_chunk || current_chunk->slab_bitmap == full_mask_check) { #if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE) g_hakmem_lock_depth++; fprintf(stderr, "[HAKMEM] CRITICAL: Chunk still has no free slabs for class %d after expansion\n", class_idx); diff --git a/hakmem.d b/hakmem.d index d2ad4a31..166ba73c 100644 --- a/hakmem.d +++ b/hakmem.d @@ -25,8 +25,10 @@ hakmem.o: core/hakmem.c core/hakmem.h core/hakmem_build_flags.h \ core/box/../hakmem_tiny_config.h core/box/../box/tls_sll_box.h \ core/box/../box/../hakmem_tiny_config.h \ core/box/../box/../hakmem_build_flags.h \ - core/box/../box/../tiny_region_id.h core/box/front_gate_classifier.h \ - core/box/hak_wrappers.inc.h + core/box/../box/../tiny_region_id.h \ + core/box/../box/../hakmem_tiny_integrity.h \ + core/box/../box/../hakmem_tiny.h core/box/../hakmem_tiny_integrity.h \ + core/box/front_gate_classifier.h core/box/hak_wrappers.inc.h core/hakmem.h: core/hakmem_build_flags.h: core/hakmem_config.h: @@ -87,5 +89,8 @@ core/box/../box/tls_sll_box.h: core/box/../box/../hakmem_tiny_config.h: core/box/../box/../hakmem_build_flags.h: core/box/../box/../tiny_region_id.h: +core/box/../box/../hakmem_tiny_integrity.h: +core/box/../box/../hakmem_tiny.h: +core/box/../hakmem_tiny_integrity.h: core/box/front_gate_classifier.h: core/box/hak_wrappers.inc.h: diff --git a/hakmem_super_registry.d b/hakmem_super_registry.d index 9c4f8e56..8a414b68 100644 --- a/hakmem_super_registry.d +++ b/hakmem_super_registry.d @@ -2,12 +2,11 @@ hakmem_super_registry.o: core/hakmem_super_registry.c \ core/hakmem_super_registry.h core/hakmem_tiny_superslab.h \ core/superslab/superslab_types.h core/hakmem_tiny_superslab_constants.h \ core/superslab/superslab_inline.h core/superslab/superslab_types.h \ - core/tiny_debug_ring.h core/tiny_remote.h \ + core/tiny_debug_ring.h core/hakmem_build_flags.h core/tiny_remote.h \ core/superslab/../tiny_box_geometry.h \ core/superslab/../hakmem_tiny_superslab_constants.h \ core/superslab/../hakmem_tiny_config.h core/tiny_debug_ring.h \ - core/tiny_remote.h core/hakmem_tiny_superslab_constants.h \ - core/hakmem_build_flags.h + core/tiny_remote.h core/hakmem_tiny_superslab_constants.h core/hakmem_super_registry.h: core/hakmem_tiny_superslab.h: core/superslab/superslab_types.h: @@ -15,6 +14,7 @@ core/hakmem_tiny_superslab_constants.h: core/superslab/superslab_inline.h: core/superslab/superslab_types.h: core/tiny_debug_ring.h: +core/hakmem_build_flags.h: core/tiny_remote.h: core/superslab/../tiny_box_geometry.h: core/superslab/../hakmem_tiny_superslab_constants.h: @@ -22,4 +22,3 @@ core/superslab/../hakmem_tiny_config.h: core/tiny_debug_ring.h: core/tiny_remote.h: core/hakmem_tiny_superslab_constants.h: -core/hakmem_build_flags.h: diff --git a/hakmem_tiny_sfc.d b/hakmem_tiny_sfc.d index 7dca2a13..d76c9a20 100644 --- a/hakmem_tiny_sfc.d +++ b/hakmem_tiny_sfc.d @@ -9,7 +9,10 @@ hakmem_tiny_sfc.o: core/hakmem_tiny_sfc.c core/tiny_alloc_fast_sfc.inc.h \ core/superslab/../hakmem_tiny_superslab_constants.h \ core/superslab/../hakmem_tiny_config.h core/tiny_debug_ring.h \ core/tiny_remote.h core/hakmem_tiny_superslab_constants.h \ - core/tiny_tls.h + core/tiny_tls.h core/box/tls_sll_box.h core/box/../ptr_trace.h \ + core/box/../hakmem_tiny_config.h core/box/../hakmem_build_flags.h \ + core/box/../tiny_region_id.h core/box/../hakmem_build_flags.h \ + core/box/../hakmem_tiny_integrity.h core/box/../hakmem_tiny.h core/tiny_alloc_fast_sfc.inc.h: core/hakmem_tiny.h: core/hakmem_build_flags.h: @@ -31,3 +34,11 @@ core/tiny_debug_ring.h: core/tiny_remote.h: core/hakmem_tiny_superslab_constants.h: core/tiny_tls.h: +core/box/tls_sll_box.h: +core/box/../ptr_trace.h: +core/box/../hakmem_tiny_config.h: +core/box/../hakmem_build_flags.h: +core/box/../tiny_region_id.h: +core/box/../hakmem_build_flags.h: +core/box/../hakmem_tiny_integrity.h: +core/box/../hakmem_tiny.h: