Files
hakmem/core/box/tls_sll_drain_box.h
Moe Charm (CI) 8b67718bf2 Fix C7 TLS SLL corruption: Protect next pointer from user data overwrites
## Root Cause
C7 (1024B allocations, 2048B stride) was using offset=1 for freelist next
pointers, storing them at `base[1..8]`. Since user pointer is `base+1`, users
could overwrite the next pointer area, corrupting the TLS SLL freelist.

## The Bug Sequence
1. Block freed → TLS SLL push stores next at `base[1..8]`
2. Block allocated → User gets `base+1`, can modify `base[1..2047]`
3. User writes data → Overwrites `base[1..8]` (next pointer area!)
4. Block freed again → tiny_next_load() reads garbage from `base[1..8]`
5. TLS SLL head becomes invalid (0xfe, 0xdb, 0x58, etc.)

## Why This Was Reverted
Previous fix (C7 offset=0) was reverted with comment:
  "C7も header を保持して class 判別を壊さないことを優先"
  (Prioritize preserving C7 header to avoid breaking class identification)

This reasoning was FLAWED because:
- Header IS restored during allocation (HAK_RET_ALLOC), not freelist ops
- Class identification at free time reads from ptr-1 = base[0] (after restoration)
- During freelist, header CAN be sacrificed (not visible to user)
- The revert CREATED the race condition by exposing base[1..8] to user

## Fix Applied

### 1. Revert C7 offset to 0 (tiny_nextptr.h:54)
```c
// BEFORE (BROKEN):
return (class_idx == 0) ? 0u : 1u;

// AFTER (FIXED):
return (class_idx == 0 || class_idx == 7) ? 0u : 1u;
```

### 2. Remove C7 header restoration in freelist (tiny_nextptr.h:84)
```c
// BEFORE (BROKEN):
if (class_idx != 0) {  // Restores header for all classes including C7

// AFTER (FIXED):
if (class_idx != 0 && class_idx != 7) {  // Only C1-C6 restore headers
```

### 3. Bonus: Remove premature slab release (tls_sll_drain_box.h:182-189)
Removed `shared_pool_release_slab()` call from drain path that could cause
use-after-free when blocks from same slab remain in TLS SLL.

## Why This Fix Works

**Memory Layout** (C7 in freelist):
```
Address:     base      base+1        base+2048
            ┌────┬──────────────────────┐
Content:    │next│  (user accessible)  │
            └────┴──────────────────────┘
            8B ptr  ← USER CANNOT TOUCH base[0]
```

- **Next pointer at base[0]**: Protected from user modification ✓
- **User pointer at base+1**: User sees base[1..2047] only ✓
- **Header restored during allocation**: HAK_RET_ALLOC writes 0xa7 at base[0] ✓
- **Class ID preserved**: tiny_region_id_read_header(ptr) reads ptr-1 = base[0] ✓

## Verification Results

### Before Fix
- **Errors**: 33 TLS_SLL_POP_INVALID per 100K iterations (0.033%)
- **Performance**: 1.8M ops/s (corruption caused slow path fallback)
- **Symptoms**: Invalid TLS SLL heads (0xfe, 0xdb, 0x58, 0x80, 0xc2, etc.)

### After Fix
- **Errors**: 0 per 200K iterations 
- **Performance**: 10.0M ops/s (+456%!) 
- **C7 direct test**: 5.5M ops/s, 100K iterations, 0 errors 

## Files Modified
- core/tiny_nextptr.h (lines 49-54, 82-84) - C7 offset=0, no header restoration
- core/box/tls_sll_drain_box.h (lines 182-189) - Remove premature slab release

## Architectural Lesson

**Design Principle**: Freelist metadata MUST be stored in memory NOT accessible to user.

| Class | Offset | Next Storage | User Access | Result |
|-------|--------|--------------|-------------|--------|
| C0 | 0 | base[0] | base[1..7] | Safe ✓ |
| C1-C6 | 1 | base[1..8] | base[1..N] | Safe (header at base[0]) ✓ |
| C7 (broken) | 1 | base[1..8] | base[1..2047] | **CORRUPTED** ✗ |
| C7 (fixed) | 0 | base[0] | base[1..2047] | Safe ✓ |

🧹 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 23:42:43 +09:00

256 lines
9.3 KiB
C

// tls_sll_drain_box.h - Box: TLS SLL Periodic Drain
// Purpose: Restore slab accounting consistency by periodically draining TLS SLL to slab freelists
//
// Problem:
// - Fast free path (hak_tiny_free_fast_v2) pushes to TLS SLL without decrementing meta->used
// - Slabs never appear empty → SuperSlabs never freed → LRU cache never populated
// - Result: 6,455 mmap/munmap syscalls per 200K iterations (74.8% time)
//
// Solution:
// - Every N frees (default: 1024), drain TLS SLL → slab freelist
// - This path decrements meta->used properly via tiny_free_local_box()
// - Enables empty detection → SuperSlabs freed → LRU cache functional
//
// Expected Impact:
// - mmap/munmap: 6,455 → ~100 calls (-96-97%)
// - Throughput: 563K → 8-10M ops/s (+1,300-1,700%)
//
// References:
// - Root cause: PHASE9_LRU_ARCHITECTURE_ISSUE.md
// - Design: Option B (Periodic TLS SLL Drain)
#pragma once
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <pthread.h>
#include "tls_sll_box.h" // TLS SLL operations (tls_sll_pop)
#include "../hakmem_tiny_config.h" // TINY_NUM_CLASSES
#include "../hakmem_super_registry.h" // SuperSlab lookup
#include "free_local_box.h" // tiny_free_local_box (decrements meta->used)
// ========== ENV Configuration ==========
// Check if TLS SLL drain is enabled
// ENV: HAKMEM_TINY_SLL_DRAIN_ENABLE=1/0 (default: 1)
static inline int tls_sll_drain_is_enabled(void) {
static int g_drain_enable = -1;
if (__builtin_expect(g_drain_enable == -1, 0)) {
const char* env = getenv("HAKMEM_TINY_SLL_DRAIN_ENABLE");
if (env && *env == '0') {
g_drain_enable = 0;
fprintf(stderr, "[TLS_SLL_DRAIN] Drain DISABLED via ENV\n");
} else {
g_drain_enable = 1;
fprintf(stderr, "[TLS_SLL_DRAIN] Drain ENABLED (default)\n");
}
}
return g_drain_enable;
}
// Get drain interval (number of frees before triggering drain)
// ENV: HAKMEM_TINY_SLL_DRAIN_INTERVAL=N (default: 2048)
static inline uint32_t tls_sll_drain_get_interval(void) {
static uint32_t g_drain_interval = 0;
if (__builtin_expect(g_drain_interval == 0, 0)) {
const char* env = getenv("HAKMEM_TINY_SLL_DRAIN_INTERVAL");
if (env && *env) {
int val = atoi(env);
if (val > 0 && val <= 65536) {
g_drain_interval = (uint32_t)val;
fprintf(stderr, "[TLS_SLL_DRAIN] Interval=%u (from ENV)\n", g_drain_interval);
} else {
g_drain_interval = 2048;
fprintf(stderr, "[TLS_SLL_DRAIN] Invalid ENV value, using default=2048\n");
}
} else {
g_drain_interval = 2048;
fprintf(stderr, "[TLS_SLL_DRAIN] Interval=%u (default)\n", g_drain_interval);
}
}
return g_drain_interval;
}
// ========== Drain Counter (TLS) ==========
// Per-class drain counter (TLS, one per size class)
// Incremented on each free, triggers drain when reaching interval
static __thread uint32_t g_tls_sll_drain_counter[TINY_NUM_CLASSES] = {0};
// Debug: Total drain operations performed (all classes)
static __thread uint64_t g_tls_sll_drain_total_calls = 0;
static __thread uint64_t g_tls_sll_drain_total_blocks = 0;
// ========== Drain Implementation (Skeleton) ==========
// Box: TLS SLL Drain
// Purpose: Pop blocks from TLS SLL and push to slab freelist
//
// Flow:
// 1. Pop up to batch_size blocks from TLS SLL (g_tls_sll_head[class_idx])
// 2. For each block:
// a. Resolve SuperSlab/Slab (like slow path does)
// b. Call tiny_free_local_box() → decrements meta->used properly
// 3. Result: meta->used reflects true state, empty detection works
//
// Args:
// class_idx: Size class to drain
// batch_size: Max blocks to drain (0 = drain all)
//
// Returns: Number of blocks drained
static inline uint32_t tiny_tls_sll_drain(int class_idx, uint32_t batch_size) {
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES) {
return 0;
}
// Sanity check: TLS SLL count
extern __thread TinyTLSSLL g_tls_sll[TINY_NUM_CLASSES];
uint32_t avail = g_tls_sll[class_idx].count;
if (avail == 0) {
return 0; // Nothing to drain
}
// Drain up to batch_size blocks (0 = drain all)
uint32_t to_drain = (batch_size == 0) ? avail : (avail < batch_size ? avail : batch_size);
uint32_t drained = 0;
// Debug logging
static int g_debug = -1;
if (__builtin_expect(g_debug == -1, 0)) {
const char* env = getenv("HAKMEM_TINY_SLL_DRAIN_DEBUG");
g_debug = (env && *env && *env != '0') ? 1 : 0;
}
if (g_debug) {
fprintf(stderr, "[TLS_SLL_DRAIN] START: class=%d avail=%u to_drain=%u\n",
class_idx, avail, to_drain);
}
// External functions needed for drain
extern SuperSlab* hak_super_lookup(void* ptr); // SuperSlab registry lookup
extern const size_t g_tiny_class_sizes[TINY_NUM_CLASSES]; // Block sizes (const)
// Get thread ID once (used for all blocks)
// Note: Use pthread_self() directly since tiny_self_u32() is static inline
uint32_t my_tid = (uint32_t)(uintptr_t)pthread_self();
// Drain loop: Pop blocks from TLS SLL and push to slab freelist
for (uint32_t i = 0; i < to_drain; i++) {
void* base = NULL;
if (!tls_sll_pop(class_idx, &base)) {
// TLS SLL exhausted (concurrent drain or count mismatch)
break;
}
// Resolve SuperSlab/Slab (like slow path does)
SuperSlab* ss = hak_super_lookup(base);
if (!ss || ss->magic != SUPERSLAB_MAGIC) {
// Invalid SuperSlab - skip this block
if (g_debug) {
fprintf(stderr, "[TLS_SLL_DRAIN] SKIP: class=%d base=%p (invalid SuperSlab)\n",
class_idx, base);
}
continue;
}
// Get slab index
int slab_idx = slab_index_for(ss, base);
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
// Invalid slab index - skip this block
if (g_debug) {
fprintf(stderr, "[TLS_SLL_DRAIN] SKIP: class=%d base=%p (invalid slab_idx=%d)\n",
class_idx, base, slab_idx);
}
continue;
}
// Get slab metadata
TinySlabMeta* meta = &ss->slabs[slab_idx];
// Convert BASE → USER pointer (add 1 byte header offset)
// Phase E1: ALL classes (C0-C7) have 1-byte header
void* user_ptr = (char*)base + 1;
// Call tiny_free_local_box() to:
// 1. Push block to slab freelist
// 2. Decrement meta->used (THIS IS THE KEY!)
tiny_free_local_box(ss, slab_idx, meta, user_ptr, my_tid);
drained++;
// BUG FIX: DO NOT release slab here even if meta->used == 0
// Reason: Other blocks from the same slab may still be queued in TLS SLL
// waiting to be drained. Releasing the slab prematurely causes:
// 1. SuperSlab reused for different class
// 2. hak_super_lookup() returns NULL for remaining blocks
// 3. TLS_SLL_POP_INVALID errors and corruption
// Solution: Let LRU eviction and normal lifecycle handle empty slab release.
// Empty slabs will naturally be reclaimed when SuperSlab is idle.
}
if (g_debug && drained > 0) {
fprintf(stderr, "[TLS_SLL_DRAIN] END: class=%d drained=%u remaining=%u\n",
class_idx, drained, g_tls_sll[class_idx].count);
}
// Update stats
g_tls_sll_drain_total_calls++;
g_tls_sll_drain_total_blocks += drained;
return drained;
}
// ========== Drain Trigger (Called from Fast Free Path) ==========
// Box: Try Drain (with counter trigger)
// Purpose: Check drain counter and trigger drain if interval reached
//
// Flow:
// 1. Increment drain counter for this class
// 2. If counter >= interval, trigger drain and reset counter
// 3. Otherwise, do nothing (fast path continues)
//
// Args:
// class_idx: Size class that was just freed
//
// Returns: Number of blocks drained (0 if no drain)
static inline uint32_t tiny_tls_sll_try_drain(int class_idx) {
// Check if drain is enabled
if (__builtin_expect(!tls_sll_drain_is_enabled(), 0)) {
return 0;
}
// Increment counter
g_tls_sll_drain_counter[class_idx]++;
// Check if interval reached
uint32_t interval = tls_sll_drain_get_interval();
if (__builtin_expect(g_tls_sll_drain_counter[class_idx] >= interval, 0)) {
// Trigger drain (drain ALL blocks to enable empty detection)
// batch_size=0 means drain all available blocks
uint32_t drained = tiny_tls_sll_drain(class_idx, 0);
// Reset counter
g_tls_sll_drain_counter[class_idx] = 0;
return drained;
}
return 0; // No drain triggered
}
// ========== Debug Stats (Destructor) ==========
#if !HAKMEM_BUILD_RELEASE
static void tls_sll_drain_print_stats(void) __attribute__((destructor));
static void tls_sll_drain_print_stats(void) {
if (g_tls_sll_drain_total_calls > 0) {
fprintf(stderr, "[TLS_SLL_DRAIN_STATS] Total drains: %lu, Total blocks: %lu, Avg: %.2f\n",
g_tls_sll_drain_total_calls,
g_tls_sll_drain_total_blocks,
(double)g_tls_sll_drain_total_blocks / g_tls_sll_drain_total_calls);
}
}
#endif