Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization

Root Cause:
1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header)
2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init)
3. superslab_init_slab() only sets class_idx if meta->class_idx==255
4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0
5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors

Changes:
1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests)
2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab)
3. Update size-to-class LUT: entries 513-2048 now map to C7
4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0)
5. Add explicit class_idx assignment in 6 initialization paths:
   - tiny_superslab_alloc.inc.h: superslab_refill() after init
   - hakmem_tiny_superslab.c: backend_shared after init (main path)
   - ss_unified_backend_box.c: unconditional assignment
   - ss_legacy_backend_box.c: explicit assignment
   - superslab_expansion_box.c: explicit assignment
   - ss_allocation_box.c: fail-safe condition fix

Fix P0 refill bug:
- Update obsolete array access after Phase 3d-B TLS SLL unification
- g_tls_sll_head[cls] → g_tls_sll[cls].head
- g_tls_sll_count[cls] → g_tls_sll[cls].count

Results:
- HDR_META_MISMATCH: eliminated (0 errors in 100K iterations)
- 1024B allocations now routed to C7 (Tiny fast path)
- NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Moe Charm (CI)
2025-11-21 13:44:05 +09:00
parent 66a29783a4
commit a78224123e
11 changed files with 266 additions and 42 deletions

View File

@ -408,7 +408,14 @@ void superslab_init_slab(SuperSlab* ss, int slab_idx, size_t block_size, uint32_
meta->capacity = capacity;
meta->carved = 0;
meta->owner_tid_low = (uint8_t)(owner_tid & 0xFFu);
// meta->class_idx is set by the caller (shared_pool / refill path)
// Fail-safe: stamp class_idx from geometry (stride → class).
// This normalizes both legacy and shared pool paths.
for (int i = 0; i < TINY_NUM_CLASSES; i++) {
if (g_tiny_class_sizes[i] == stride) {
meta->class_idx = (uint8_t)i;
break;
}
}
superslab_activate_slab(ss, slab_idx);
}

View File

@ -125,6 +125,11 @@ void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
for (int slab_idx = 0; slab_idx < cap; slab_idx++) {
TinySlabMeta* meta = &chunk->slabs[slab_idx];
// Skip slabs that belong to a different class (or are uninitialized).
if (meta->class_idx != (uint8_t)class_idx) {
continue;
}
if (meta->capacity == 0) {
continue;
}
@ -270,6 +275,10 @@ int expand_superslab_head(SuperSlabHead* head) {
superslab_init_slab(new_chunk, 0, block_size, owner_tid);
// CRITICAL FIX: Explicitly set class_idx to avoid C0/C7 confusion.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
new_chunk->slabs[0].class_idx = (uint8_t)head->class_idx;
// Initialize the next_chunk link to NULL
new_chunk->next_chunk = NULL;

View File

@ -64,11 +64,10 @@ void* hak_tiny_alloc_superslab_backend_shared(int class_idx)
superslab_init_slab(ss, slab_idx, block_size, 0);
meta = &ss->slabs[slab_idx];
// Ensure class_idx is bound to this class after init. superslab_init_slab
// does not touch class_idx by design; shared_pool owns that field.
if (meta->class_idx == 255) {
meta->class_idx = (uint8_t)class_idx;
}
// CRITICAL FIX: Always set class_idx after init to avoid C0/C7 confusion.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
// Must explicitly set to requested class, not just when class_idx==255.
meta->class_idx = (uint8_t)class_idx;
}
// Final contract check before computing addresses.

View File

@ -67,6 +67,10 @@ ExpansionResult expansion_expand_with_tls_guarantee(
size_t block_size = g_tiny_class_sizes[class_idx];
superslab_init_slab(new_ss, 0, block_size, my_tid);
// CRITICAL FIX: Explicitly set class_idx to avoid C0/C7 confusion.
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
new_ss->slabs[0].class_idx = (uint8_t)class_idx;
// Now bind slab 0 to TLS state
result.new_state.ss = new_ss;
result.new_state.class_idx = class_idx;
@ -78,6 +82,14 @@ ExpansionResult expansion_expand_with_tls_guarantee(
// Formula: base = ss + (slab_idx * SLAB_SIZE) + (slab_idx == 0 ? SLAB0_OFFSET : 0)
result.new_state.slab_base = (uint8_t*)new_ss + SUPERSLAB_SLAB0_DATA_OFFSET;
// Debug: log backend used for expansion (first few only)
static _Atomic uint32_t g_ss_backend_log = 0;
uint32_t n = atomic_fetch_add_explicit(&g_ss_backend_log, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr, "[SS_BACKEND] expand legacy cls=%d ss=%p slab_idx=0 base=%p\n",
class_idx, (void*)new_ss, result.new_state.slab_base);
}
result.success = true;
result.error_code = 0;

View File

@ -13,8 +13,7 @@
*
* HAKMEM_TINY_HEADER_CLASSIDX != 0:
* - Class 0: next_off = 0 (free中は header を潰す)
* - Class 1-6: next_off = 1
* - Class 7: next_off = 0
* - Class 1-7: next_off = 1 (headerを保持)
*
* HAKMEM_TINY_HEADER_CLASSIDX == 0:
* - 全クラス: next_off = 0