Phase 3d-B: TLS Cache Merge - Unified g_tls_sll[] structure (+12-18% expected)
Merge separate g_tls_sll_head[] and g_tls_sll_count[] arrays into unified TinyTLSSLL struct to improve L1D cache locality. Expected performance gain: +12-18% from reducing cache line splits (2 loads → 1 load per operation). Changes: - core/hakmem_tiny.h: Add TinyTLSSLL type (16B aligned, head+count+pad) - core/hakmem_tiny.c: Replace separate arrays with g_tls_sll[8] - core/box/tls_sll_box.h: Update Box API (13 sites) for unified access - Updated 32+ files: All g_tls_sll_head[i] → g_tls_sll[i].head - Updated 32+ files: All g_tls_sll_count[i] → g_tls_sll[i].count - core/hakmem_tiny_integrity.h: Unified canary guards - core/box/integrity_box.c: Simplified canary validation - Makefile: Added core/box/tiny_sizeclass_hist_box.o to link Build: ✅ PASS (10K ops sanity test) Warnings: Only pre-existing LTO type mismatches (unrelated) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@ -14,8 +14,8 @@
|
||||
#include "tiny_region_id.h" // For HEADER_MAGIC, HEADER_CLASS_MASK (Fix #7)
|
||||
|
||||
// External TLS variables (defined in hakmem_tiny.c)
|
||||
extern __thread void* g_tls_sll_head[TINY_NUM_CLASSES];
|
||||
extern __thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES];
|
||||
// Phase 3d-B: TLS Cache Merge - Unified TLS SLL structure
|
||||
extern __thread TinyTLSSLL g_tls_sll[TINY_NUM_CLASSES];
|
||||
|
||||
#ifndef TINY_NUM_CLASSES
|
||||
#define TINY_NUM_CLASSES 8
|
||||
@ -49,19 +49,19 @@ extern __thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES];
|
||||
(ptr_out) = NULL; \
|
||||
break; \
|
||||
} \
|
||||
void* _head = g_tls_sll_head[(class_idx)]; \
|
||||
void* _head = g_tls_sll[(class_idx)].head; \
|
||||
if (__builtin_expect(_head != NULL, 1)) { \
|
||||
if (__builtin_expect((uintptr_t)_head == TINY_REMOTE_SENTINEL, 0)) { \
|
||||
/* Break the chain defensively if sentinel leaked into TLS SLL */ \
|
||||
g_tls_sll_head[(class_idx)] = NULL; \
|
||||
if (g_tls_sll_count[(class_idx)] > 0) g_tls_sll_count[(class_idx)]--; \
|
||||
g_tls_sll[(class_idx)].head = NULL; \
|
||||
if (g_tls_sll[(class_idx)].count > 0) g_tls_sll[(class_idx)].count--; \
|
||||
(ptr_out) = NULL; \
|
||||
} else { \
|
||||
/* Phase E1-CORRECT: Use Box API for next pointer read */ \
|
||||
void* _next = tiny_next_read(class_idx, _head); \
|
||||
g_tls_sll_head[(class_idx)] = _next; \
|
||||
if (g_tls_sll_count[(class_idx)] > 0) { \
|
||||
g_tls_sll_count[(class_idx)]--; \
|
||||
g_tls_sll[(class_idx)].head = _next; \
|
||||
if (g_tls_sll[(class_idx)].count > 0) { \
|
||||
g_tls_sll[(class_idx)].count--; \
|
||||
} \
|
||||
/* Phase 7: Fast path returns BASE pointer; HAK_RET_ALLOC does BASE→USER */ \
|
||||
(ptr_out) = _head; \
|
||||
@ -103,15 +103,15 @@ extern __thread uint32_t g_tls_sll_count[TINY_NUM_CLASSES];
|
||||
/* Restore header at BASE (not at user). */ \
|
||||
*_base = HEADER_MAGIC | ((class_idx) & HEADER_CLASS_MASK); \
|
||||
/* Link node using BASE as the canonical SLL node address. */ \
|
||||
tiny_next_write((class_idx), _base, g_tls_sll_head[(class_idx)]); \
|
||||
g_tls_sll_head[(class_idx)] = _base; \
|
||||
g_tls_sll_count[(class_idx)]++; \
|
||||
tiny_next_write((class_idx), _base, g_tls_sll[(class_idx)].head); \
|
||||
g_tls_sll[(class_idx)].head = _base; \
|
||||
g_tls_sll[(class_idx)].count++; \
|
||||
} while(0)
|
||||
#else
|
||||
#define TINY_ALLOC_FAST_PUSH_INLINE(class_idx, ptr) do { \
|
||||
tiny_next_write(class_idx, (ptr), g_tls_sll_head[(class_idx)]); \
|
||||
g_tls_sll_head[(class_idx)] = (ptr); \
|
||||
g_tls_sll_count[(class_idx)]++; \
|
||||
tiny_next_write(class_idx, (ptr), g_tls_sll[(class_idx)].head); \
|
||||
g_tls_sll[(class_idx)].head = (ptr); \
|
||||
g_tls_sll[(class_idx)].count++; \
|
||||
} while(0)
|
||||
#endif
|
||||
|
||||
|
||||
Reference in New Issue
Block a user