Fix ptr_user_to_base_blind regression: use class-aware base calculation and correct slab index lookup

This commit is contained in:
Moe Charm (CI)
2025-12-03 12:29:31 +09:00
parent c2716f5c01
commit c91602f181
4 changed files with 65 additions and 55 deletions

View File

@ -125,8 +125,8 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
if (!ss || ss->magic != SUPERSLAB_MAGIC) return;
// Derive class_idx from per-slab metadata instead of ss->size_class
int class_idx = -1;
void* base = ptr_user_to_base_blind(ptr);
int slab_idx = slab_index_for(ss, base);
// void* base = ptr_user_to_base_blind(ptr); // FIX: Use ptr (USER) directly
int slab_idx = slab_index_for(ss, ptr); // FIX: slab_index_for works better with ptr (USER) for C0/C7
if (slab_idx >= 0 && slab_idx < ss_slabs_capacity(ss)) {
TinySlabMeta* meta_probe = &ss->slabs[slab_idx];
if (meta_probe->class_idx < TINY_NUM_CLASSES) {
@ -155,9 +155,9 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
}
tiny_debug_ring_record(TINY_RING_EVENT_FREE_ENTER, (uint16_t)class_idx, ptr, 0);
// Detect cross-thread: cross-thread free MUST go via superslab path
// FIX: Phase E1-CORRECT - Convert USER → BASE before slab index calculation
base = ptr_user_to_base_blind(ptr);
slab_idx = slab_index_for(ss, base);
// FIX: Use ptr (USER) for slab index calculation to handle C0/C7 boundary correctly
// base = ptr_user_to_base_blind(ptr);
slab_idx = slab_index_for(ss, ptr);
int ss_cap = ss_slabs_capacity(ss);
if (__builtin_expect(slab_idx < 0 || slab_idx >= ss_cap, 0)) {
tiny_debug_ring_record(TINY_RING_EVENT_SUPERSLAB_ADOPT_FAIL, (uint16_t)0xFEu, ss, (uintptr_t)slab_idx);
@ -167,8 +167,8 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
if (__builtin_expect(g_tiny_safe_free, 0)) {
size_t blk = g_tiny_class_sizes[class_idx];
uint8_t* slab_base = tiny_slab_base_for(ss, slab_idx);
// Phase E1-CORRECT: All classes have headers, validate block base (ptr-1) not user ptr
uintptr_t delta = (uintptr_t)ptr_user_to_base_blind(ptr) - (uintptr_t)slab_base;
// Phase E1-CORRECT: All classes have headers, validate block base using known class_idx
uintptr_t delta = (uintptr_t)HAK_BASE_TO_RAW(ptr_user_to_base(HAK_USER_FROM_RAW(ptr), class_idx)) - (uintptr_t)slab_base;
int cap_ok = (meta->capacity > 0) ? 1 : 0;
int align_ok = (delta % blk) == 0;
int range_ok = cap_ok && (delta / blk) < meta->capacity;
@ -216,7 +216,7 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
if (__builtin_expect(g_debug_fast0, 0)) {
tiny_debug_ring_record(TINY_RING_EVENT_FRONT_BYPASS, (uint16_t)class_idx, ptr, (uintptr_t)slab_idx);
// Phase E1-CORRECT: ALL classes (C0-C7) have 1-byte header
void* base = ptr_user_to_base_blind(ptr);
void* base = HAK_BASE_TO_RAW(ptr_user_to_base(HAK_USER_FROM_RAW(ptr), class_idx));
void* prev = meta->freelist;
tiny_next_write(class_idx, base, prev); // Box API: uses offset 1 for headers
meta->freelist = base;
@ -234,7 +234,7 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
// Front-V2: try to return to TLS magazine first (A/B, default OFF)
// Phase 7-Step8: Use config macro for dead code elimination in PGO mode
if (__builtin_expect(TINY_FRONT_HEAP_V2_ENABLED && class_idx <= 3, 0)) {
void* base = ptr_user_to_base_blind(ptr);
void* base = HAK_BASE_TO_RAW(ptr_user_to_base(HAK_USER_FROM_RAW(ptr), class_idx));
if (tiny_heap_v2_try_push(class_idx, base)) {
tiny_debug_ring_record(TINY_RING_EVENT_FREE_FAST, (uint16_t)class_idx, ptr, slab_idx);
HAK_STAT_FREE(class_idx);
@ -244,7 +244,7 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
if (g_fast_enable && g_fast_cap[class_idx] != 0) {
// Phase E1-CORRECT: ALL classes (C0-C7) have 1-byte header
void* base = ptr_user_to_base_blind(ptr);
void* base = HAK_BASE_TO_RAW(ptr_user_to_base(HAK_USER_FROM_RAW(ptr), class_idx));
int pushed = 0;
// Phase 7-Step5: Use config macro for dead code elimination in PGO mode
if (__builtin_expect(TINY_FRONT_FASTCACHE_ENABLED && class_idx <= 3, 1)) {
@ -268,7 +268,7 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
// TinyHotMag front push8/16/32B, A/B
if (__builtin_expect(g_hotmag_enable && class_idx <= 2, 1)) {
// Phase E1-CORRECT: ALL classes (C0-C7) have 1-byte header
void* base = ptr_user_to_base_blind(ptr);
void* base = HAK_BASE_TO_RAW(ptr_user_to_base(HAK_USER_FROM_RAW(ptr), class_idx));
if (hotmag_push(class_idx, base)) {
tiny_debug_ring_record(TINY_RING_EVENT_FREE_RETURN_MAG, (uint16_t)class_idx, ptr, 1);
HAK_STAT_FREE(class_idx);
@ -277,7 +277,7 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
}
if (tls->count < tls->cap) {
// Phase E1-CORRECT: ALL classes (C0-C7) have 1-byte header
void* base = ptr_user_to_base_blind(ptr);
void* base = HAK_BASE_TO_RAW(ptr_user_to_base(HAK_USER_FROM_RAW(ptr), class_idx));
tiny_tls_list_guard_push(class_idx, tls, base);
tls_list_push(tls, base, class_idx);
tiny_debug_ring_record(TINY_RING_EVENT_FREE_LOCAL, (uint16_t)class_idx, ptr, 0);
@ -290,7 +290,7 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
}
{
// Phase E1-CORRECT: ALL classes (C0-C7) have 1-byte header
void* base = ptr_user_to_base_blind(ptr);
void* base = HAK_BASE_TO_RAW(ptr_user_to_base(HAK_USER_FROM_RAW(ptr), class_idx));
tiny_tls_list_guard_push(class_idx, tls, base);
tls_list_push(tls, base, class_idx);
}
@ -307,9 +307,9 @@ void hak_tiny_free_with_slab(void* ptr, TinySlab* slab) {
}
#include "tiny_free_magazine.inc.h"
// ============================================================================
// ============================================================================
// Phase 6.23: SuperSlab Allocation Helpers
// ============================================================================
// ============================================================================
// Phase 6.24: Allocate from SuperSlab slab (lazy freelist + linear allocation)
#include "tiny_superslab_alloc.inc.h"
@ -332,8 +332,8 @@ void hak_tiny_free(void* ptr) {
// Resolve class_idx from per-slab metadata instead of ss->size_class
SuperSlab* ss = hak_super_lookup(ptr);
if (ss && ss->magic == SUPERSLAB_MAGIC) {
void* base = ptr_user_to_base_blind(ptr);
int sidx = slab_index_for(ss, base);
// void* base = ptr_user_to_base_blind(ptr); // FIX: Use ptr
int sidx = slab_index_for(ss, ptr);
if (sidx >= 0 && sidx < ss_slabs_capacity(ss)) {
TinySlabMeta* m = &ss->slabs[sidx];
if (m->class_idx < TINY_NUM_CLASSES) {
@ -392,8 +392,8 @@ void hak_tiny_free(void* ptr) {
// Resolve class_idx from per-slab metadata instead of ss->size_class
SuperSlab* ss = hak_super_lookup(ptr);
if (ss && ss->magic == SUPERSLAB_MAGIC) {
void* base = ptr_user_to_base_blind(ptr);
int sidx = slab_index_for(ss, base);
// void* base = ptr_user_to_base_blind(ptr); // FIX: Use ptr
int sidx = slab_index_for(ss, ptr);
if (sidx >= 0 && sidx < ss_slabs_capacity(ss)) {
TinySlabMeta* m = &ss->slabs[sidx];
if (m->class_idx < TINY_NUM_CLASSES) {
@ -470,8 +470,8 @@ void hak_tiny_free(void* ptr) {
if (g_use_superslab) {
fast_ss = hak_super_lookup(ptr);
if (fast_ss && fast_ss->magic == SUPERSLAB_MAGIC) {
void* base = ptr_user_to_base_blind(ptr);
int sidx = slab_index_for(fast_ss, base);
// void* base = ptr_user_to_base_blind(ptr); // FIX: Use ptr
int sidx = slab_index_for(fast_ss, ptr);
if (sidx >= 0 && sidx < ss_slabs_capacity(fast_ss)) {
TinySlabMeta* m = &fast_ss->slabs[sidx];
if (m->class_idx < TINY_NUM_CLASSES) {
@ -494,8 +494,8 @@ void hak_tiny_free(void* ptr) {
int ss_cls = -1, ts_cls = -1;
SuperSlab* chk_ss = fast_ss ? fast_ss : (g_use_superslab ? hak_super_lookup(ptr) : NULL);
if (chk_ss && chk_ss->magic == SUPERSLAB_MAGIC) {
void* base = ptr_user_to_base_blind(ptr);
int sidx = slab_index_for(chk_ss, base);
// void* base = ptr_user_to_base_blind(ptr); // FIX: Use ptr
int sidx = slab_index_for(chk_ss, ptr);
if (sidx >= 0 && sidx < ss_slabs_capacity(chk_ss)) {
TinySlabMeta* m = &chk_ss->slabs[sidx];
if (m->class_idx < TINY_NUM_CLASSES) {
@ -516,7 +516,7 @@ void hak_tiny_free(void* ptr) {
}
if (fast_class_idx >= 0 && g_fast_enable && g_fast_cap[fast_class_idx] != 0) {
// Phase E1-CORRECT: ALL classes (C0-C7) have 1-byte header
void* base2 = ptr_user_to_base_blind(ptr);
void* base2 = HAK_BASE_TO_RAW(ptr_user_to_base(HAK_USER_FROM_RAW(ptr), fast_class_idx));
// PRIORITY 1: Try FastCache first (bypasses SLL when Front-Direct)
int pushed = 0;
// Phase 7-Step5: Use config macro for dead code elimination in PGO mode
@ -543,8 +543,8 @@ void hak_tiny_free(void* ptr) {
if (ss && ss->magic == SUPERSLAB_MAGIC) {
// Derive class from per-slab meta
int cls = -1;
void* base = ptr_user_to_base_blind(ptr);
int sidx = slab_index_for(ss, base);
// void* base = ptr_user_to_base_blind(ptr); // FIX: Use ptr
int sidx = slab_index_for(ss, ptr);
if (sidx >= 0 && sidx < ss_slabs_capacity(ss)) {
TinySlabMeta* m = &ss->slabs[sidx];
if (m->class_idx < TINY_NUM_CLASSES) {
@ -574,9 +574,9 @@ void hak_tiny_free(void* ptr) {
hak_tiny_free_with_slab(ptr, slab);
}
// ============================================================================
// ============================================================================
// EXTRACTED TO hakmem_tiny_query.c (Phase 2B-1)
// ============================================================================
// ============================================================================
// EXTRACTED: int hak_tiny_is_managed(void* ptr) {
// EXTRACTED: if (!ptr || !g_tiny_initialized) return 0;
// EXTRACTED: // Phase 6.12.1: O(1) slab lookup via registry/list
@ -631,9 +631,9 @@ void hak_tiny_free(void* ptr) {
// EXTRACTED: }
// ============================================================================
// ============================================================================
// Statistics and Debug Functions - Extracted to hakmem_tiny_stats.c
// ============================================================================
// ============================================================================
// (Phase 2B API headers moved to top of file)
@ -672,4 +672,4 @@ void hak_tiny_shutdown(void) {
// Always-available: Trim empty slabs (release fully-free slabs)
// Always-available: Trim empty slabs (release fully-free slabs)