diff --git a/core/box/tls_sll_box.h b/core/box/tls_sll_box.h index 6a0c1dc0..0256a158 100644 --- a/core/box/tls_sll_box.h +++ b/core/box/tls_sll_box.h @@ -28,6 +28,7 @@ #include "../ptr_trace.h" // Debug-only: pointer next read/write tracing #include "../hakmem_tiny_config.h" // For TINY_NUM_CLASSES #include "../hakmem_build_flags.h" +#include "../tiny_remote.h" // For TINY_REMOTE_SENTINEL detection #include "../tiny_region_id.h" // HEADER_MAGIC / HEADER_CLASS_MASK #include "../hakmem_tiny_integrity.h" // PRIORITY 2: Freelist integrity checks #include "../ptr_track.h" // Pointer tracking for debugging header corruption @@ -231,6 +232,25 @@ static inline bool tls_sll_pop(int class_idx, void** out) { return false; // SLL empty } + // ✅ CRITICAL FIX: Detect remote sentinel leaked into TLS SLL + // The sentinel (0xBADA55BADA55BADA) is used by remote free operations + // If it leaks into TLS SLL head, dereferencing it causes SEGV + if (__builtin_expect((uintptr_t)base == TINY_REMOTE_SENTINEL, 0)) { + // Reset corrupted TLS SLL state + g_tls_sll_head[class_idx] = NULL; + g_tls_sll_count[class_idx] = 0; + + // Log sentinel detection (helps identify root cause) + static __thread int sentinel_logged = 0; + if (sentinel_logged < 10) { + fprintf(stderr, "[SENTINEL_DETECT] class=%d head=0x%lx (BADASS) - TLS SLL reset\n", + class_idx, (unsigned long)TINY_REMOTE_SENTINEL); + sentinel_logged++; + } + + return false; // Trigger refill path + } + // PRIORITY 2: Validate base pointer BEFORE dereferencing #if !HAKMEM_BUILD_RELEASE if (!validate_ptr_range(base, "tls_sll_pop_base")) { diff --git a/core/hakmem_tiny_fastcache.inc.h b/core/hakmem_tiny_fastcache.inc.h index 5a026375..b12458ad 100644 --- a/core/hakmem_tiny_fastcache.inc.h +++ b/core/hakmem_tiny_fastcache.inc.h @@ -18,6 +18,8 @@ #include #include #include +#include "tiny_remote.h" // For TINY_REMOTE_SENTINEL detection +#include "box/tiny_next_ptr_box.h" // For tiny_next_read() // External TLS variables extern int g_fast_enable; @@ -81,16 +83,14 @@ static inline __attribute__((always_inline)) void* tiny_fast_pop(int class_idx) if (!head) return NULL; // Phase 7: header-aware next pointer (C0-C6: base+1, C7: base) #if HAKMEM_TINY_HEADER_CLASSIDX - const size_t next_offset = (class_idx == 7) ? 0 : 1; + // Phase E1-CORRECT: ALL classes have 1-byte header, next ptr at offset 1 + const size_t next_offset = 1; #else const size_t next_offset = 0; #endif - // Use safe unaligned load for "next" to avoid UB when offset==1 - void* next = NULL; - { - #include "tiny_nextptr.h" - next = tiny_next_load(head, class_idx); - } + // Phase E1-CORRECT: Use Box API for next pointer read + #include "box/tiny_next_ptr_box.h" + void* next = tiny_next_read(class_idx, head); g_fast_head[class_idx] = next; uint16_t count = g_fast_count[class_idx]; if (count > 0) { @@ -98,17 +98,38 @@ static inline __attribute__((always_inline)) void* tiny_fast_pop(int class_idx) } else { g_fast_count[class_idx] = 0; } - // CRITICAL FIX: Convert base -> user pointer for classes 0-6 - // Headerless class (1KB): clear embedded next pointer before returning to user - if (__builtin_expect(class_idx == 7, 0)) { - *(void**)head = NULL; - return head; // C7: return base (headerless) - } - // C0-C6: return user pointer (base+1) + // Phase E1-CORRECT: All classes return user pointer (base+1) return (void*)((uint8_t*)head + 1); } static inline __attribute__((always_inline)) int tiny_fast_push(int class_idx, void* ptr) { + // ✅ CRITICAL FIX: Prevent sentinel-poisoned nodes from entering fast cache + // Remote free operations can write SENTINEL to node->next, which eventually + // propagates through freelist → TLS list → fast cache. If we push such a node, + // the next pop will try to dereference the sentinel → SEGV! + if (__builtin_expect((uintptr_t)ptr == TINY_REMOTE_SENTINEL, 0)) { + static __thread int sentinel_ptr_logged = 0; + if (sentinel_ptr_logged < 5) { + fprintf(stderr, "[FAST_PUSH_SENTINEL] cls=%d ptr=%p BLOCKED (ptr is sentinel)!\n", + class_idx, ptr); + sentinel_ptr_logged++; + } + return 0; // Reject push + } + + // ✅ CRITICAL FIX #2: Also check if node's NEXT pointer is sentinel (defense-in-depth) + // This catches nodes that have sentinel in their next field (from remote free) + void* next_check = tiny_next_read(class_idx, ptr); + if (__builtin_expect((uintptr_t)next_check == TINY_REMOTE_SENTINEL, 0)) { + static __thread int sentinel_next_logged = 0; + if (sentinel_next_logged < 5) { + fprintf(stderr, "[FAST_PUSH_NEXT_SENTINEL] cls=%d ptr=%p next=%p BLOCKED (next is sentinel)!\n", + class_idx, ptr, next_check); + sentinel_next_logged++; + } + return 0; // Reject push + } + if (!g_fast_enable) { g_fast_push_disabled[class_idx]++; tiny_fast_debug_log(class_idx, "disabled", 0, 0); @@ -128,14 +149,14 @@ static inline __attribute__((always_inline)) int tiny_fast_push(int class_idx, v } // Phase 7: header-aware next pointer (C0-C6: base+1, C7: base) #if HAKMEM_TINY_HEADER_CLASSIDX - const size_t next_offset2 = (class_idx == 7) ? 0 : 1; + // Phase E1-CORRECT: ALL classes have 1-byte header, next ptr at offset 1 + const size_t next_offset2 = 1; #else const size_t next_offset2 = 0; #endif - { - #include "tiny_nextptr.h" - tiny_next_store(ptr, class_idx, g_fast_head[class_idx]); - } + // Phase E1-CORRECT: Use Box API for next pointer write + #include "box/tiny_next_ptr_box.h" + tiny_next_write(class_idx, ptr, g_fast_head[class_idx]); g_fast_head[class_idx] = ptr; g_fast_count[class_idx] = (uint16_t)(count + 1); g_fast_push_hits[class_idx]++; diff --git a/core/hakmem_tiny_tls_list.h b/core/hakmem_tiny_tls_list.h index 9dc920e8..04fcf090 100644 --- a/core/hakmem_tiny_tls_list.h +++ b/core/hakmem_tiny_tls_list.h @@ -2,9 +2,9 @@ #define HAKMEM_TINY_TLS_LIST_H #include +#include // For fprintf in sentinel detection #include "tiny_remote.h" // TINY_REMOTE_SENTINEL for head poisoning guard -#include "tiny_nextptr.h" // header-aware next load/store -#include "tiny_nextptr.h" +#include "box/tiny_next_ptr_box.h" // Phase E1-CORRECT: unified next pointer API // Forward declarations typedef struct TinySlabMeta TinySlabMeta; @@ -59,14 +59,39 @@ static inline void* tls_list_pop(TinyTLSList* tls, int class_idx) { tls->count = 0; return NULL; } - tls->head = tiny_next_load(head, class_idx); + tls->head = tiny_next_read(class_idx, head); if (tls->count > 0) tls->count--; return head; } static inline void tls_list_push(TinyTLSList* tls, void* node, int class_idx) { if (!node) return; - tiny_next_store(node, class_idx, tls->head); + + // ✅ CRITICAL FIX: Prevent sentinel-poisoned nodes from entering TLS list + // Defense-in-depth: Same guard as tiny_fast_push + if (__builtin_expect((uintptr_t)node == TINY_REMOTE_SENTINEL, 0)) { + static __thread int tls_sentinel_node_logged = 0; + if (tls_sentinel_node_logged < 5) { + fprintf(stderr, "[TLS_LIST_PUSH_SENTINEL] cls=%d node=%p BLOCKED (node is sentinel)!\n", + class_idx, node); + tls_sentinel_node_logged++; + } + return; // Reject push + } + + // ✅ CRITICAL FIX #2: Check if node's NEXT pointer is sentinel + void* next_check = tiny_next_read(class_idx, node); + if (__builtin_expect((uintptr_t)next_check == TINY_REMOTE_SENTINEL, 0)) { + static __thread int tls_sentinel_next_logged = 0; + if (tls_sentinel_next_logged < 5) { + fprintf(stderr, "[TLS_LIST_PUSH_NEXT_SENTINEL] cls=%d node=%p next=%p BLOCKED (next is sentinel)!\n", + class_idx, node, next_check); + tls_sentinel_next_logged++; + } + return; // Reject push + } + + tiny_next_write(class_idx, node, tls->head); tls->head = node; tls->count++; } @@ -78,14 +103,14 @@ static inline void tls_list_push(TinyTLSList* tls, void* node, int class_idx) { // - caller handles spill/thresholds separately static inline void* tls_list_pop_fast(TinyTLSList* tls, int class_idx) { void* head = tls->head; if (!head) return NULL; - tls->head = tiny_next_load(head, class_idx); + tls->head = tiny_next_read(class_idx, head); if (tls->count > 0) tls->count--; return head; } static inline void tls_list_push_fast(TinyTLSList* tls, void* node, int class_idx) { if (!node) return; - tiny_next_store(node, class_idx, tls->head); + tiny_next_write(class_idx, node, tls->head); tls->head = node; tls->count++; } @@ -111,14 +136,14 @@ static inline uint32_t tls_list_bulk_take(TinyTLSList* tls, void* cur = head; uint32_t taken = 1; while (taken < want) { - void* next = tiny_next_load(cur, class_idx); + void* next = tiny_next_read(class_idx, cur); if (!next) break; cur = next; taken++; } void* tail = cur; - void* rest = tiny_next_load(tail, class_idx); - tiny_next_store(tail, class_idx, NULL); + void* rest = tiny_next_read(class_idx, tail); + tiny_next_write(class_idx, tail, NULL); tls->head = rest; tls->count -= taken; @@ -130,7 +155,7 @@ static inline uint32_t tls_list_bulk_take(TinyTLSList* tls, static inline uint32_t tls_list_count_chain(void* head, int class_idx) { uint32_t cnt = 0; if (!head) return 0; - while (head) { cnt++; head = tiny_next_load(head, class_idx); } + while (head) { cnt++; head = tiny_next_read(class_idx, head); } return cnt; } @@ -144,17 +169,17 @@ static inline void tls_list_bulk_put(TinyTLSList* tls, // Determine tail and count if not supplied tail = head; uint32_t computed = 1; - while (tiny_next_load(tail, class_idx)) { tail = tiny_next_load(tail, class_idx); computed++; } + while (tiny_next_read(class_idx, tail)) { tail = tiny_next_read(class_idx, tail); computed++; } if (count == 0) count = computed; } if (count == 0) { count = tls_list_count_chain(head, class_idx); // Move tail pointer to end if still NULL (just to be safe) void* cur2 = head; - while (tiny_next_load(cur2, class_idx)) cur2 = tiny_next_load(cur2, class_idx); + while (tiny_next_read(class_idx, cur2)) cur2 = tiny_next_read(class_idx, cur2); tail = cur2; } - tiny_next_store(tail, class_idx, tls->head); + tiny_next_write(class_idx, tail, tls->head); tls->head = head; tls->count += count; }