Files
hakmem/core/tiny_adaptive_sizing.c
Moe Charm (CI) 72b38bc994 Phase E3-FINAL: Fix Box API offset bugs - ALL classes now use correct offsets
## Root Cause Analysis (GPT5)

**Physical Layout Constraints**:
- Class 0: 8B = [1B header][7B payload] → offset 1 = 9B needed =  IMPOSSIBLE
- Class 1-6: >=16B = [1B header][15B+ payload] → offset 1 =  POSSIBLE
- Class 7: 1KB → offset 0 (compatibility)

**Correct Specification**:
- HAKMEM_TINY_HEADER_CLASSIDX != 0:
  - Class 0, 7: next at offset 0 (overwrites header when on freelist)
  - Class 1-6: next at offset 1 (after header)
- HAKMEM_TINY_HEADER_CLASSIDX == 0:
  - All classes: next at offset 0

**Previous Bug**:
- Attempted "ALL classes offset 1" unification
- Class 0 with offset 1 caused immediate SEGV (9B > 8B block size)
- Mixed 2-arg/3-arg API caused confusion

## Fixes Applied

### 1. Restored 3-Argument Box API (core/box/tiny_next_ptr_box.h)
```c
// Correct signatures
void tiny_next_write(int class_idx, void* base, void* next_value)
void* tiny_next_read(int class_idx, const void* base)

// Correct offset calculation
size_t offset = (class_idx == 0 || class_idx == 7) ? 0 : 1;
```

### 2. Updated 123+ Call Sites Across 34 Files
- hakmem_tiny_hot_pop_v4.inc.h (4 locations)
- hakmem_tiny_fastcache.inc.h (3 locations)
- hakmem_tiny_tls_list.h (12 locations)
- superslab_inline.h (5 locations)
- tiny_fastcache.h (3 locations)
- ptr_trace.h (macro definitions)
- tls_sll_box.h (2 locations)
- + 27 additional files

Pattern: `tiny_next_read(base)` → `tiny_next_read(class_idx, base)`
Pattern: `tiny_next_write(base, next)` → `tiny_next_write(class_idx, base, next)`

### 3. Added Sentinel Detection Guards
- tiny_fast_push(): Block nodes with sentinel in ptr or ptr->next
- tls_list_push(): Block nodes with sentinel in ptr or ptr->next
- Defense-in-depth against remote free sentinel leakage

## Verification (GPT5 Report)

**Test Command**: `./out/release/bench_random_mixed_hakmem --iterations=70000`

**Results**:
-  Main loop completed successfully
-  Drain phase completed successfully
-  NO SEGV (previous crash at iteration 66151 is FIXED)
- ℹ️ Final log: "tiny_alloc(1024) failed" is normal fallback to Mid/ACE layers

**Analysis**:
- Class 0 immediate SEGV:  RESOLVED (correct offset 0 now used)
- 66K iteration crash:  RESOLVED (offset consistency fixed)
- Box API conflicts:  RESOLVED (unified 3-arg API)

## Technical Details

### Offset Logic Justification
```
Class 0:  8B block → next pointer (8B) fits ONLY at offset 0
Class 1: 16B block → next pointer (8B) fits at offset 1 (after 1B header)
Class 2: 32B block → next pointer (8B) fits at offset 1
...
Class 6: 512B block → next pointer (8B) fits at offset 1
Class 7: 1024B block → offset 0 for legacy compatibility
```

### Files Modified (Summary)
- Core API: `box/tiny_next_ptr_box.h`
- Hot paths: `hakmem_tiny_hot_pop*.inc.h`, `tiny_fastcache.h`
- TLS layers: `hakmem_tiny_tls_list.h`, `hakmem_tiny_tls_ops.h`
- SuperSlab: `superslab_inline.h`, `tiny_superslab_*.inc.h`
- Refill: `hakmem_tiny_refill.inc.h`, `tiny_refill_opt.h`
- Free paths: `tiny_free_magazine.inc.h`, `tiny_superslab_free.inc.h`
- Documentation: Multiple Phase E3 reports

## Remaining Work

None for Box API offset bugs - all structural issues resolved.

Future enhancements (non-critical):
- Periodic `grep -R '*(void**)' core/` to detect direct pointer access violations
- Enforce Box API usage via static analysis
- Document offset rationale in architecture docs

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-13 06:50:20 +09:00

178 lines
5.8 KiB
C

// tiny_adaptive_sizing.c - Phase 2b: TLS Cache Adaptive Sizing Implementation
// Purpose: Hot classes get more cache → Better hit rate → Higher throughput
#include "tiny_adaptive_sizing.h"
#include "hakmem_tiny.h"
#include "box/tiny_next_ptr_box.h" // Phase E1-CORRECT: Box API
#include <stdio.h>
#include <stdlib.h>
// TLS per-thread stats
__thread TLSCacheStats g_tls_cache_stats[TINY_NUM_CLASSES];
// Global enable flag (default: enabled, can disable via env)
int g_adaptive_sizing_enabled = 1;
// Logging enable flag (default: disabled; enable via HAKMEM_ADAPTIVE_LOG=1)
static int g_adaptive_logging_enabled = 0;
// Forward declaration for draining blocks
extern void tiny_superslab_return_block(void* ptr, int class_idx);
extern int hak_tiny_size_to_class(size_t size);
// ========== Initialization ==========
void adaptive_sizing_init(void) {
// Read environment variable
const char* env = getenv("HAKMEM_ADAPTIVE_SIZING");
if (env && atoi(env) == 0) {
g_adaptive_sizing_enabled = 0;
fprintf(stderr, "[ADAPTIVE] Adaptive sizing disabled via env\n");
return;
}
// Read logging flag
const char* log_env = getenv("HAKMEM_ADAPTIVE_LOG");
if (log_env && atoi(log_env) == 0) {
g_adaptive_logging_enabled = 0;
}
// Initialize stats for each class
for (int class_idx = 0; class_idx < TINY_NUM_CLASSES; class_idx++) {
TLSCacheStats* stats = &g_tls_cache_stats[class_idx];
stats->capacity = TLS_CACHE_INITIAL_CAPACITY; // Start with 64 slots
stats->high_water_mark = 0;
stats->refill_count = 0;
stats->shrink_count = 0;
stats->grow_count = 0;
stats->last_adapt_time = get_timestamp_ns();
}
if (g_adaptive_logging_enabled) {
fprintf(stderr, "[ADAPTIVE] Adaptive sizing initialized (initial_cap=%d, min=%d, max=%d)\n",
TLS_CACHE_INITIAL_CAPACITY, TLS_CACHE_MIN_CAPACITY, TLS_CACHE_MAX_CAPACITY);
}
}
// ========== Grow/Shrink Functions ==========
void grow_tls_cache(int class_idx) {
TLSCacheStats* stats = &g_tls_cache_stats[class_idx];
size_t new_capacity = stats->capacity * 2;
if (new_capacity > TLS_CACHE_MAX_CAPACITY) {
new_capacity = TLS_CACHE_MAX_CAPACITY;
}
if (new_capacity == stats->capacity) {
return; // Already at max
}
size_t old_capacity = stats->capacity;
stats->capacity = new_capacity;
stats->grow_count++;
if (g_adaptive_logging_enabled) {
fprintf(stderr, "[TLS_CACHE] Grow class %d: %zu → %zu slots (grow_count=%zu)\n",
class_idx, old_capacity, stats->capacity, stats->grow_count);
}
}
void drain_excess_blocks(int class_idx, int count) {
void** head = &g_tls_sll_head[class_idx];
int drained = 0;
while (*head && drained < count) {
void* block = *head;
*head = tiny_next_read(class_idx, block); // Pop from TLS list
// Return to SuperSlab (best effort - ignore failures)
// Note: tiny_superslab_return_block may not exist, use simpler approach
// Just drop the blocks for now (they'll be reclaimed by OS eventually)
// TODO: Integrate with proper SuperSlab return path
drained++;
if (g_tls_sll_count[class_idx] > 0) {
g_tls_sll_count[class_idx]--;
}
}
if (g_adaptive_logging_enabled && drained > 0) {
fprintf(stderr, "[TLS_CACHE] Drained %d excess blocks from class %d\n", drained, class_idx);
}
}
void shrink_tls_cache(int class_idx) {
TLSCacheStats* stats = &g_tls_cache_stats[class_idx];
size_t new_capacity = stats->capacity / 2;
if (new_capacity < TLS_CACHE_MIN_CAPACITY) {
new_capacity = TLS_CACHE_MIN_CAPACITY;
}
if (new_capacity == stats->capacity) {
return; // Already at min
}
// Evict excess blocks if current count > new_capacity
if (g_tls_sll_count[class_idx] > new_capacity) {
int excess = (int)(g_tls_sll_count[class_idx] - new_capacity);
drain_excess_blocks(class_idx, excess);
}
size_t old_capacity = stats->capacity;
stats->capacity = new_capacity;
stats->shrink_count++;
if (g_adaptive_logging_enabled) {
fprintf(stderr, "[TLS_CACHE] Shrink class %d: %zu → %zu slots (shrink_count=%zu)\n",
class_idx, old_capacity, stats->capacity, stats->shrink_count);
}
}
// ========== Adaptation Logic ==========
void adapt_tls_cache_size(int class_idx) {
if (!g_adaptive_sizing_enabled) return;
TLSCacheStats* stats = &g_tls_cache_stats[class_idx];
// Adapt every N refills or M seconds
uint64_t now = get_timestamp_ns();
int should_adapt = (stats->refill_count >= ADAPT_REFILL_THRESHOLD) ||
((now - stats->last_adapt_time) >= ADAPT_TIME_THRESHOLD_NS);
if (!should_adapt) {
return; // Too soon to adapt
}
// Avoid division by zero
if (stats->capacity == 0) {
stats->capacity = TLS_CACHE_INITIAL_CAPACITY;
return;
}
// Calculate usage ratio
double usage_ratio = (double)stats->high_water_mark / (double)stats->capacity;
// Decide: grow, shrink, or keep
if (usage_ratio > GROW_THRESHOLD) {
// High usage (>80%) → grow cache
grow_tls_cache(class_idx);
} else if (usage_ratio < SHRINK_THRESHOLD) {
// Low usage (<20%) → shrink cache
shrink_tls_cache(class_idx);
} else {
// Moderate usage (20-80%) → keep current size
if (g_adaptive_logging_enabled) {
fprintf(stderr, "[TLS_CACHE] Keep class %d at %zu slots (usage=%.1f%%)\n",
class_idx, stats->capacity, usage_ratio * 100.0);
}
}
// Reset stats for next window
stats->high_water_mark = g_tls_sll_count[class_idx];
stats->refill_count = 0;
stats->last_adapt_time = now;
}