Files
hakmem/core/hakmem_tiny_bg_spill.c
Moe Charm (CI) 72b38bc994 Phase E3-FINAL: Fix Box API offset bugs - ALL classes now use correct offsets
## Root Cause Analysis (GPT5)

**Physical Layout Constraints**:
- Class 0: 8B = [1B header][7B payload] → offset 1 = 9B needed =  IMPOSSIBLE
- Class 1-6: >=16B = [1B header][15B+ payload] → offset 1 =  POSSIBLE
- Class 7: 1KB → offset 0 (compatibility)

**Correct Specification**:
- HAKMEM_TINY_HEADER_CLASSIDX != 0:
  - Class 0, 7: next at offset 0 (overwrites header when on freelist)
  - Class 1-6: next at offset 1 (after header)
- HAKMEM_TINY_HEADER_CLASSIDX == 0:
  - All classes: next at offset 0

**Previous Bug**:
- Attempted "ALL classes offset 1" unification
- Class 0 with offset 1 caused immediate SEGV (9B > 8B block size)
- Mixed 2-arg/3-arg API caused confusion

## Fixes Applied

### 1. Restored 3-Argument Box API (core/box/tiny_next_ptr_box.h)
```c
// Correct signatures
void tiny_next_write(int class_idx, void* base, void* next_value)
void* tiny_next_read(int class_idx, const void* base)

// Correct offset calculation
size_t offset = (class_idx == 0 || class_idx == 7) ? 0 : 1;
```

### 2. Updated 123+ Call Sites Across 34 Files
- hakmem_tiny_hot_pop_v4.inc.h (4 locations)
- hakmem_tiny_fastcache.inc.h (3 locations)
- hakmem_tiny_tls_list.h (12 locations)
- superslab_inline.h (5 locations)
- tiny_fastcache.h (3 locations)
- ptr_trace.h (macro definitions)
- tls_sll_box.h (2 locations)
- + 27 additional files

Pattern: `tiny_next_read(base)` → `tiny_next_read(class_idx, base)`
Pattern: `tiny_next_write(base, next)` → `tiny_next_write(class_idx, base, next)`

### 3. Added Sentinel Detection Guards
- tiny_fast_push(): Block nodes with sentinel in ptr or ptr->next
- tls_list_push(): Block nodes with sentinel in ptr or ptr->next
- Defense-in-depth against remote free sentinel leakage

## Verification (GPT5 Report)

**Test Command**: `./out/release/bench_random_mixed_hakmem --iterations=70000`

**Results**:
-  Main loop completed successfully
-  Drain phase completed successfully
-  NO SEGV (previous crash at iteration 66151 is FIXED)
- ℹ️ Final log: "tiny_alloc(1024) failed" is normal fallback to Mid/ACE layers

**Analysis**:
- Class 0 immediate SEGV:  RESOLVED (correct offset 0 now used)
- 66K iteration crash:  RESOLVED (offset consistency fixed)
- Box API conflicts:  RESOLVED (unified 3-arg API)

## Technical Details

### Offset Logic Justification
```
Class 0:  8B block → next pointer (8B) fits ONLY at offset 0
Class 1: 16B block → next pointer (8B) fits at offset 1 (after 1B header)
Class 2: 32B block → next pointer (8B) fits at offset 1
...
Class 6: 512B block → next pointer (8B) fits at offset 1
Class 7: 1024B block → offset 0 for legacy compatibility
```

### Files Modified (Summary)
- Core API: `box/tiny_next_ptr_box.h`
- Hot paths: `hakmem_tiny_hot_pop*.inc.h`, `tiny_fastcache.h`
- TLS layers: `hakmem_tiny_tls_list.h`, `hakmem_tiny_tls_ops.h`
- SuperSlab: `superslab_inline.h`, `tiny_superslab_*.inc.h`
- Refill: `hakmem_tiny_refill.inc.h`, `tiny_refill_opt.h`
- Free paths: `tiny_free_magazine.inc.h`, `tiny_superslab_free.inc.h`
- Documentation: Multiple Phase E3 reports

## Remaining Work

None for Box API offset bugs - all structural issues resolved.

Future enhancements (non-critical):
- Periodic `grep -R '*(void**)' core/` to detect direct pointer access violations
- Enforce Box API usage via static analysis
- Document offset rationale in architecture docs

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-13 06:50:20 +09:00

110 lines
4.4 KiB
C

#include "hakmem_tiny_bg_spill.h"
#include "hakmem_tiny_superslab.h" // For SuperSlab, TinySlabMeta, ss_active_dec_one
#include "hakmem_super_registry.h" // For hak_super_registry_lookup
#include "tiny_remote.h"
#include "hakmem_tiny.h"
#include "box/tiny_next_ptr_box.h" // Phase E1-CORRECT: Box API
#include <pthread.h>
static inline uint32_t tiny_self_u32_guard(void) {
return (uint32_t)(uintptr_t)pthread_self();
}
#include <stdlib.h> // For getenv, atoi
// Global variables
int g_bg_spill_enable = 0; // HAKMEM_TINY_BG_SPILL=1
int g_bg_spill_target = 128; // HAKMEM_TINY_BG_TARGET (per class)
int g_bg_spill_max_batch = 128; // HAKMEM_TINY_BG_MAX_BATCH
_Atomic uintptr_t g_bg_spill_head[TINY_NUM_CLASSES];
_Atomic uint32_t g_bg_spill_len[TINY_NUM_CLASSES];
void bg_spill_init(void) {
// Parse environment variables
char* bs = getenv("HAKMEM_TINY_BG_SPILL");
if (bs) g_bg_spill_enable = (atoi(bs) != 0) ? 1 : 0;
char* bt2 = getenv("HAKMEM_TINY_BG_TARGET");
if (bt2) { int v = atoi(bt2); if (v > 0 && v <= 8192) g_bg_spill_target = v; }
char* mb = getenv("HAKMEM_TINY_BG_MAX_BATCH");
if (mb) { int v = atoi(mb); if (v > 0 && v <= 4096) g_bg_spill_max_batch = v; }
// Initialize atomic queues
for (int k = 0; k < TINY_NUM_CLASSES; k++) {
atomic_store_explicit(&g_bg_spill_head[k], (uintptr_t)0, memory_order_relaxed);
atomic_store_explicit(&g_bg_spill_len[k], 0u, memory_order_relaxed);
}
}
void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock) {
uint32_t approx = atomic_load_explicit(&g_bg_spill_len[class_idx], memory_order_relaxed);
if (approx == 0) return;
uintptr_t chain = atomic_exchange_explicit(&g_bg_spill_head[class_idx], (uintptr_t)0, memory_order_acq_rel);
if (chain == 0) return;
// Split chain up to max_batch
int processed = 0;
void* rest = NULL;
void* cur = (void*)chain;
void* prev = NULL;
// Phase 7: header-aware next pointer (C0-C6: base+1, C7: base)
#if HAKMEM_TINY_HEADER_CLASSIDX
// Phase E1-CORRECT: ALL classes have 1-byte header, next ptr at offset 1
const size_t next_off = 1;
#else
const size_t next_off = 0;
#endif
#include "box/tiny_next_ptr_box.h"
while (cur && processed < g_bg_spill_max_batch) {
prev = cur;
cur = tiny_next_read(class_idx, cur);
processed++;
}
if (cur != NULL) { rest = cur; tiny_next_write(class_idx, prev, NULL); }
// Return processed nodes to SS freelists
pthread_mutex_lock(lock);
uint32_t self_tid = tiny_self_u32_guard();
void* node = (void*)chain;
while (node) {
SuperSlab* owner_ss = hak_super_lookup(node);
int node_class_idx = owner_ss ? owner_ss->size_class : 0;
void* next = tiny_next_read(class_idx, node);
if (owner_ss && owner_ss->magic == SUPERSLAB_MAGIC) {
int slab_idx = slab_index_for(owner_ss, node);
TinySlabMeta* meta = &owner_ss->slabs[slab_idx];
if (!tiny_remote_guard_allow_local_push(owner_ss, slab_idx, meta, node, "bg_spill", self_tid)) {
(void)ss_remote_push(owner_ss, slab_idx, node);
if (meta->used > 0) meta->used--;
node = next;
continue;
}
void* prev = meta->freelist;
// Phase E1-CORRECT: ALL classes have headers, use Box API
tiny_next_write(class_idx, node, prev);
meta->freelist = node;
tiny_failfast_log("bg_spill", owner_ss->size_class, owner_ss, meta, node, prev);
meta->used--;
// Active was decremented at free time
}
node = next;
}
pthread_mutex_unlock(lock);
if (processed > 0) {
atomic_fetch_sub_explicit(&g_bg_spill_len[class_idx], (uint32_t)processed, memory_order_relaxed);
}
if (rest) {
// Prepend remainder back to head
uintptr_t old_head;
void* tail = rest;
while (tiny_next_read(class_idx, tail)) tail = tiny_next_read(class_idx, tail);
do {
old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire);
tiny_next_write(class_idx, tail, (void*)old_head);
} while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head,
(uintptr_t)rest,
memory_order_release, memory_order_relaxed));
}
}