Box TLS-SLL + free boundary hardening: normalize C0–C6 to base (ptr-1) at free boundary; route all caches/freelists via base; replace remaining g_tls_sll_head direct writes with Box API (tls_sll_push/splice) in refill/magazine/ultra; keep C7 excluded. Fixes rbp=0xa0 free crash by preventing header overwrite and centralizing TLS-SLL invariants.

This commit is contained in:
Moe Charm (CI)
2025-11-10 16:48:20 +09:00
parent 1b6624dec4
commit b09ba4d40d
26 changed files with 1079 additions and 354 deletions

View File

@ -1,3 +1,8 @@
// ============================================================================
// Box TLS-SLL API
// ============================================================================
#include "box/tls_sll_box.h"
// ============================================================================
// Step 3: Cold-path outline - Wrapper Context Handler
// ============================================================================
@ -147,10 +152,8 @@ void* hak_tiny_alloc(size_t size) {
// Minimal Front for hot tiny classes (bench-focused):
// SLL direct pop → minimal refill → pop, bypassing other layers.
if (__builtin_expect(class_idx <= 3, 1)) {
void* head = g_tls_sll_head[class_idx];
if (__builtin_expect(head != NULL, 1)) {
g_tls_sll_head[class_idx] = *(void**)head;
if (g_tls_sll_count[class_idx] > 0) g_tls_sll_count[class_idx]--;
void* head = NULL;
if (tls_sll_pop(class_idx, &head)) {
HAK_RET_ALLOC(class_idx, head);
}
// Refill a small batch directly from TLS-cached SuperSlab
@ -159,10 +162,7 @@ void* hak_tiny_alloc(size_t size) {
#else
(void)sll_refill_small_from_ss(class_idx, 32);
#endif
head = g_tls_sll_head[class_idx];
if (__builtin_expect(head != NULL, 1)) {
g_tls_sll_head[class_idx] = *(void**)head;
if (g_tls_sll_count[class_idx] > 0) g_tls_sll_count[class_idx]--;
if (tls_sll_pop(class_idx, &head)) {
HAK_RET_ALLOC(class_idx, head);
}
// Fall through to slow path if still empty
@ -205,11 +205,9 @@ void* hak_tiny_alloc(size_t size) {
tiny_small_mags_init_once();
if (class_idx > 3) tiny_mag_init_if_needed(class_idx);
#endif
void* head = g_tls_sll_head[class_idx];
if (__builtin_expect(head != NULL, 1)) {
void* head = NULL;
if (tls_sll_pop(class_idx, &head)) {
tiny_debug_ring_record(TINY_RING_EVENT_ALLOC_SUCCESS, (uint16_t)class_idx, head, 0);
g_tls_sll_head[class_idx] = *(void**)head;
if (g_tls_sll_count[class_idx] > 0) g_tls_sll_count[class_idx]--;
HAK_RET_ALLOC(class_idx, head);
}
#ifndef HAKMEM_TINY_BENCH_SLL_ONLY
@ -231,11 +229,8 @@ void* hak_tiny_alloc(size_t size) {
#else
if (__builtin_expect(sll_refill_small_from_ss(class_idx, bench_refill) > 0, 0)) {
#endif
head = g_tls_sll_head[class_idx];
if (head) {
if (tls_sll_pop(class_idx, &head)) {
tiny_debug_ring_record(TINY_RING_EVENT_ALLOC_SUCCESS, (uint16_t)class_idx, head, 2);
g_tls_sll_head[class_idx] = *(void**)head;
if (g_tls_sll_count[class_idx] > 0) g_tls_sll_count[class_idx]--;
HAK_RET_ALLOC(class_idx, head);
}
}
@ -254,6 +249,7 @@ void* hak_tiny_alloc(size_t size) {
}
}
if (__builtin_expect(hotmag_ptr != NULL, 1)) {
if (__builtin_expect(class_idx == 7, 0)) { *(void**)hotmag_ptr = NULL; }
tiny_debug_ring_record(TINY_RING_EVENT_ALLOC_SUCCESS, (uint16_t)class_idx, hotmag_ptr, 3);
HAK_RET_ALLOC(class_idx, hotmag_ptr);
}
@ -282,6 +278,7 @@ void* hak_tiny_alloc(size_t size) {
#if HAKMEM_BUILD_DEBUG
g_tls_hit_count[class_idx]++;
#endif
if (__builtin_expect(class_idx == 7, 0)) { *(void**)fast_hot = NULL; }
tiny_debug_ring_record(TINY_RING_EVENT_ALLOC_SUCCESS, (uint16_t)class_idx, fast_hot, 4);
HAK_RET_ALLOC(class_idx, fast_hot);
}
@ -292,6 +289,7 @@ void* hak_tiny_alloc(size_t size) {
#if HAKMEM_BUILD_DEBUG
g_tls_hit_count[class_idx]++;
#endif
if (__builtin_expect(class_idx == 7, 0)) { *(void**)fast = NULL; }
tiny_debug_ring_record(TINY_RING_EVENT_ALLOC_SUCCESS, (uint16_t)class_idx, fast, 5);
HAK_RET_ALLOC(class_idx, fast);
}