Files
hakmem/core/hakmem_tiny.h

412 lines
16 KiB
C
Raw Normal View History

#ifndef HAKMEM_TINY_H
#define HAKMEM_TINY_H
#include <stddef.h>
#include <stdint.h>
#include "hakmem_build_flags.h"
#include "hakmem_trace.h" // Optional USDT (perf) tracepoints
#include <pthread.h>
#include <stdatomic.h>
// Include page mini-magazine module (Phase 1: Hybrid optimization)
#include "hakmem_tiny_mini_mag.h"
// Forward declaration for initialization guard
int hak_is_initializing(void);
// Phase 6.12: Tiny Pool - Slab Allocator for ≤1KB allocations
// 8 size classes: 8B, 16B, 32B, 64B, 128B, 256B, 512B, 1KB
// ============================================================================
// Configuration
// ============================================================================
#define TINY_NUM_CLASSES 8
#define TINY_SLAB_SIZE (64 * 1024) // 64KB per slab
// Phase E1-CORRECT: All Tiny classes use a 1-byte header.
// C7 stride=1024B → usable 1023B (1024-1). 1024B は Mid allocator に委譲する。
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
#define TINY_MAX_SIZE 1024 // Tiny handles up to 1024B (C7 total size) - default
Phase 16: Dynamic Tiny/Mid Boundary with A/B Testing (ENV-controlled) IMPLEMENTATION: =============== Add dynamic boundary adjustment between Tiny and Mid allocators via HAKMEM_TINY_MAX_CLASS environment variable for performance tuning. Changes: -------- 1. hakmem_tiny.h/c: Add tiny_get_max_size() - reads ENV and maps class to max usable size (default: class 7 = 1023B, can reduce to class 5 = 255B) 2. hakmem_mid_mt.h/c: Add mid_get_min_size() - returns tiny_get_max_size() + 1 to ensure no size gap between allocators 3. hak_alloc_api.inc.h: Replace static TINY_MAX_SIZE with dynamic tiny_get_max_size() call in allocation routing logic 4. Size gap fix: Mid's range now dynamically adjusts based on Tiny's max (prevents 256-1023B from falling through when HAKMEM_TINY_MAX_CLASS=5) A/B BENCHMARK RESULTS: ====================== Config A (Default, C0-C7, Tiny up to 1023B): 128B: 6.34M ops/s | 256B: 6.34M ops/s 512B: 5.55M ops/s | 1024B: 5.91M ops/s Config B (Reduced, C0-C5, Tiny up to 255B): 128B: 1.38M ops/s (-78%) | 256B: 1.36M ops/s (-79%) 512B: 1.33M ops/s (-76%) | 1024B: 1.37M ops/s (-77%) FINDINGS: ========= ✅ Size gap fixed - no OOM crashes with HAKMEM_TINY_MAX_CLASS=5 ❌ Severe performance degradation (-76% to -79%) when reducing Tiny coverage ❌ Even 128B degraded (should still use Tiny) - possible class filtering issue ⚠️ Mid's coarse size classes (8KB/16KB/32KB) cause fragmentation for small sizes HYPOTHESIS: ----------- Mid allocator uses 8KB blocks for all 256-1024B allocations, causing: - Severe internal fragmentation (1024B request → 8KB block = 87% waste) - Poor cache utilization - Consistent ~1.3M ops/s across all sizes (same 8KB class) RECOMMENDATION: =============== **Keep default HAKMEM_TINY_MAX_CLASS=7 (C0-C7, up to 1023B)** Reducing Tiny coverage is COUNTERPRODUCTIVE with current Mid allocator design. To make this viable, Mid would need finer size classes for 256B-8KB range. ENV USAGE (for future experimentation): ---------------------------------------- export HAKMEM_TINY_MAX_CLASS=7 # Default (C0-C7, up to 1023B) export HAKMEM_TINY_MAX_CLASS=5 # Reduced (C0-C5, up to 255B) - NOT recommended 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-16 01:26:48 +09:00
// Phase 16: Dynamic Tiny max size control (ENV: HAKMEM_TINY_MAX_CLASS)
// Strategy: Reduce Tiny coverage to ~256B, delegate 512/1024B to Mid
// ENV values:
// HAKMEM_TINY_MAX_CLASS=5 → Tiny handles up to 255B (C0-C5)
// HAKMEM_TINY_MAX_CLASS=7 → Tiny handles up to 1023B (C0-C7, default)
// Forward declaration (implementation in hakmem_tiny.c)
// Optimized: Inline for hot path (0.95% overhead removal)
#include <stdlib.h>
#include <stdbool.h>
extern bool smallmid_is_enabled(void);
static inline size_t tiny_get_max_size(void) {
static size_t g_cached = 0;
if (__builtin_expect(g_cached == 0, 0)) {
const char* env = getenv("HAKMEM_TINY_MAX_CLASS");
int max_class = 7;
if (env && *env) {
int parsed = atoi(env);
if (parsed >= 0 && parsed < TINY_NUM_CLASSES) max_class = parsed;
}
if (smallmid_is_enabled() && max_class > 5) max_class = 5;
static const size_t sizes[8] = {7, 15, 31, 63, 127, 255, 511, 2047};
g_cached = sizes[max_class];
}
return g_cached;
}
// ============================================================================
// Phase 3d-B: TLS Cache Merge - Unified TLS SLL Structure
// ============================================================================
//
// Goal: Improve L1D cache hit rate by merging head+count into same struct.
//
// OLD (cache line split):
// __thread void* g_tls_sll_head[8]; // 64 bytes (cache line 0)
// __thread uint32_t g_tls_sll_count[8]; // 32 bytes (cache line 1)
// → 2 L1D loads per operation (head from line 0, count from line 1)
//
// NEW (unified):
// __thread TinyTLSSLL g_tls_sll[8]; // 128 bytes = 2 cache lines
// → 1 L1D load per operation (head+count in same 16B struct)
//
// Expected: +12-18% improvement from cache locality
//
typedef struct {
void* head; // SLL head pointer (8 bytes)
uint32_t count; // Number of elements in SLL (4 bytes)
uint32_t _pad; // Padding to 16 bytes for cache alignment (4 bytes)
} TinyTLSSLL;
// ============================================================================
// Size Classes
// ============================================================================
// Size class table (branchless lookup)
// Note: Definition is in hakmem_tiny.c to avoid multiple definition errors
// Declaration is in hakmem_tiny_config.h as: extern const size_t g_tiny_class_sizes[TINY_NUM_CLASSES];
// Box 3 (tiny_box_geometry.h) uses this via hakmem_tiny_config.h
// (Definition removed from header - see hakmem_tiny.c)
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// Full LUT (1..2048) for branchless size-to-class mapping (index by size).
// Phase C7-UPGRADE: Expanded from 1025 -> 2049 to support 2048B stride (C7).
// Memory cost ~2KB. Zero hot-path arithmetic for all Tiny sizes.
// Generate repeated values via helper macros to keep the source compact.
#define HAK_R1(x) x
#define HAK_R2(x) HAK_R1(x), HAK_R1(x)
#define HAK_R4(x) HAK_R2(x), HAK_R2(x)
#define HAK_R8(x) HAK_R4(x), HAK_R4(x)
#define HAK_R16(x) HAK_R8(x), HAK_R8(x)
#define HAK_R32(x) HAK_R16(x), HAK_R16(x)
#define HAK_R64(x) HAK_R32(x), HAK_R32(x)
#define HAK_R128(x) HAK_R64(x), HAK_R64(x)
#define HAK_R256(x) HAK_R128(x), HAK_R128(x)
#define HAK_R512(x) HAK_R256(x), HAK_R256(x)
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
#define HAK_R1024(x) HAK_R512(x), HAK_R512(x)
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
static const int8_t g_size_to_class_lut_2k[2049] = {
-1, // index 0: invalid
HAK_R8(0), // 1..8 -> class 0
HAK_R8(1), // 9..16 -> class 1
HAK_R16(2), // 17..32 -> class 2
HAK_R32(3), // 33..64 -> class 3
HAK_R64(4), // 65..128 -> class 4
HAK_R128(5), // 129..256 -> class 5
HAK_R256(6), // 257..512 -> class 6
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
HAK_R1024(7), // 513..1536 -> class 7 (1024 entries)
HAK_R512(7), // 1537..2048 -> class 7 (512 entries)
};
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
#undef HAK_R1024
#undef HAK_R512
#undef HAK_R256
#undef HAK_R128
#undef HAK_R64
#undef HAK_R32
#undef HAK_R16
#undef HAK_R8
#undef HAK_R4
#undef HAK_R2
#undef HAK_R1
// Blocks per slab for each class
static const uint16_t g_tiny_blocks_per_slab[TINY_NUM_CLASSES] = {
8192, // Class 0: 64KB / 8B = 8192 blocks
4096, // Class 1: 64KB / 16B = 4096 blocks
2048, // Class 2: 64KB / 32B = 2048 blocks
1024, // Class 3: 64KB / 64B = 1024 blocks
512, // Class 4: 64KB / 128B = 512 blocks
256, // Class 5: 64KB / 256B = 256 blocks
128, // Class 6: 64KB / 512B = 128 blocks
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
32 // Class 7: 64KB / 2048B = 32 blocks
};
// Bitmap size (uint64_t words) for each class
static const uint8_t g_tiny_bitmap_words[TINY_NUM_CLASSES] = {
128, // Class 0: 8192 blocks / 64 = 128 words
64, // Class 1: 4096 blocks / 64 = 64 words
32, // Class 2: 2048 blocks / 64 = 32 words
16, // Class 3: 1024 blocks / 64 = 16 words
8, // Class 4: 512 blocks / 64 = 8 words
4, // Class 5: 256 blocks / 64 = 4 words
2, // Class 6: 128 blocks / 64 = 2 words
1 // Class 7: 64 blocks / 64 = 1 word
};
// ============================================================================
// Data Structures
// ============================================================================
// Forward declaration
typedef struct TinySlab TinySlab;
// Step 2: Slab Registry (Hash Table for O(1) lookup)
#define SLAB_REGISTRY_SIZE 1024
#define SLAB_REGISTRY_MASK (SLAB_REGISTRY_SIZE - 1)
#define SLAB_REGISTRY_MAX_PROBE 8
typedef struct {
uintptr_t slab_base; // 64KB aligned base address (0 = empty slot)
_Atomic(TinySlab*) owner; // Atomic pointer to TinySlab metadata (MT-safe)
} SlabRegistryEntry;
// Global registry (extern for access from multiple translation units)
extern SlabRegistryEntry g_slab_registry[SLAB_REGISTRY_SIZE];
// Tiny Pool initialization flag (extern for inline function access)
extern int g_tiny_initialized;
Adaptive CAS: Single-threaded fast path optimization PROBLEM: - Atomic freelist (Phase 1) introduced 3-5x overhead in hot path - CAS loop overhead: 16-27 cycles vs 4-6 cycles (non-atomic) - Single-threaded workloads pay MT safety cost unnecessarily SOLUTION: - Runtime thread detection with g_hakmem_active_threads counter - Single-threaded (1T): Skip CAS, use relaxed load/store (fast) - Multi-threaded (2+T): Full CAS loop for MT safety IMPLEMENTATION: 1. core/hakmem_tiny.c:240 - Added g_hakmem_active_threads atomic counter 2. core/hakmem_tiny.c:248 - Added hakmem_thread_register() for per-thread init 3. core/hakmem_tiny.h:160-163 - Exported thread counter and registration API 4. core/box/hak_alloc_api.inc.h:34 - Call hakmem_thread_register() on first alloc 5. core/box/slab_freelist_atomic.h:58-68 - Adaptive CAS in pop_lockfree() 6. core/box/slab_freelist_atomic.h:118-126 - Adaptive CAS in push_lockfree() DESIGN: - Thread counter: Incremented on first allocation per thread - Fast path check: if (num_threads <= 1) → relaxed ops - Slow path: Full CAS loop (existing Phase 1 implementation) - Zero overhead when truly single-threaded PERFORMANCE: Random Mixed 256B (Single-threaded): Before (Phase 1): 16.7M ops/s After: 14.9M ops/s (-11%, thread counter overhead) Larson (Single-threaded): Before: 47.9M ops/s After: 47.9M ops/s (no change, already fast) Larson (Multi-threaded 8T): Before: 48.8M ops/s After: 48.3M ops/s (-1%, within noise) MT STABILITY: 1T: 47.9M ops/s ✅ 8T: 48.3M ops/s ✅ (zero crashes, stable) NOTES: - Expected Larson improvement (0.80M → 1.80M) not observed - Larson was already fast (47.9M) in Phase 1 - Possible Task investigation used different benchmark - Adaptive CAS implementation verified and working correctly 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-22 03:30:47 +09:00
// Adaptive CAS: Active thread counter (for single-threaded optimization)
extern _Atomic uint32_t g_hakmem_active_threads;
// Adaptive CAS: Thread registration (called on first allocation)
void hakmem_thread_register(void);
// Per-class locks to protect slab lists and bitmaps (padded to avoid false sharing)
typedef struct __attribute__((aligned(64))) { pthread_mutex_t m; char _pad[64]; } PaddedLock;
extern PaddedLock g_tiny_class_locks[TINY_NUM_CLASSES];
// Slab header (one per 64KB slab)
typedef struct TinySlab {
void* base; // Base address (64KB aligned)
uint64_t* bitmap; // Free block bitmap (dynamic size)
uint16_t free_count; // Number of free blocks
uint16_t total_count; // Total blocks in slab
uint8_t class_idx; // Size class index (0-7)
uint8_t _padding[3];
struct TinySlab* next; // Next slab in list
// MPSC remote-free stack head (lock-free). Stores user ptrs; next is embedded in block.
atomic_uintptr_t remote_head;
// Approximate count of pending remote frees (for drain thresholding)
atomic_uint remote_count;
// Targeted remote-drain queue linkage and state (for BG drain targeting)
struct TinySlab* remote_q_next; // Intrusive next pointer for target stack
atomic_uint remote_queued; // 0=not enqueued, 1=enqueued (CAS guarded)
// Owning thread (for remote detection). Allocations from this thread use TLS fast path.
pthread_t owner_tid;
// Hint for next scan start (reduces bitmap word scanning)
uint16_t hint_word;
// Summary bitmap (2nd level): per 64-word group, bit=1 if the group has any free block
uint8_t summary_words; // number of summary words (=(bitmap_words+63)/64)
uint8_t _pad_sum[1];
uint64_t* summary; // length = summary_words
// Phase 1: Page Mini-Magazine (Hybrid bitmap+free-list optimization)
// Fast LIFO cache (16-32 items) for O(1) allocation without bitmap scan
// Cost: 1-2 ns (vs 5-6 ns bitmap scan)
PageMiniMag mini_mag; // LIFO free-list cache
} TinySlab;
// Global Tiny Pool state
typedef struct {
TinySlab* free_slabs[TINY_NUM_CLASSES]; // Slabs with free blocks
TinySlab* full_slabs[TINY_NUM_CLASSES]; // Full slabs (no free blocks)
uint64_t alloc_count[TINY_NUM_CLASSES]; // Allocation count per class
uint64_t free_count[TINY_NUM_CLASSES]; // Free count per class
uint64_t slab_count[TINY_NUM_CLASSES]; // Total slabs per class
} TinyPool;
// Global pool instance (defined in hakmem_tiny.c)
extern TinyPool g_tiny_pool;
// ============================================================================
// API Functions
// ============================================================================
// Initialize Tiny Pool
void hak_tiny_init(void);
// Allocate from Tiny Pool (returns NULL if size > 1KB)
void* hak_tiny_alloc(size_t size);
// Free to Tiny Pool (no-op if ptr is not managed by Tiny Pool)
void hak_tiny_free(void* ptr);
// Phase 6.12.1: Free with pre-calculated slab (avoids duplicate owner_slab lookup)
void hak_tiny_free_with_slab(void* ptr, TinySlab* slab);
// Check if pointer is managed by Tiny Pool
int hak_tiny_is_managed(void* ptr);
int hak_tiny_is_managed_superslab(void* ptr);
// Return the usable size for a Tiny-managed pointer (0 if unknown/not tiny).
// For SuperSlab-backed blocks, uses size class from the owning SuperSlab.
// For TinySlab-backed blocks, uses class_idx from the owning slab.
size_t hak_tiny_usable_size(void* ptr);
// Get statistics
void hak_tiny_get_stats(uint64_t* alloc_count, uint64_t* free_count, uint64_t* slab_count);
// Print statistics (debug)
void hak_tiny_print_stats(void);
// Phase 7.7: Magazine flush API (reduce memory footprint)
// Flush Magazine cache to freelists, enabling empty SuperSlab detection
void hak_tiny_magazine_flush(int class_idx);
void hak_tiny_magazine_flush_all(void);
// Trim empty Tiny slabs by releasing fully-free slabs back to the system.
// Safe to call anytime; holds per-class locks while trimming.
void hak_tiny_trim(void);
// Optional shutdown hook for Tiny subsystem.
// Stops background threads (e.g., Deferred Intelligence) and performs
// any best-effort cleanup needed during process shutdown.
void hak_tiny_shutdown(void);
// Phase 8.2: Memory profiling (toggle with HAKMEM_DEBUG_MEMORY)
// Print detailed breakdown of memory usage by component
void hak_tiny_print_memory_profile(void);
// Debug: dump Ultra Tiny counters (pop hits/refills/resets)
void hak_tiny_ultra_debug_dump(void);
void hak_tiny_path_debug_dump(void);
// ============================================================================
// ACE Learning Layer: Runtime parameter adjustment
// ============================================================================
// Exported for ACE controller access
extern int g_remote_drain_thresh_per_class[TINY_NUM_CLASSES];
// Set remote drain threshold for a specific size class
void hkm_ace_set_drain_threshold(int class_idx, uint32_t threshold);
// ============================================================================
// Internal Helpers (branchless size-to-class)
// ============================================================================
// Convert size to class index (branchless lookup)
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// Phase C7-UPGRADE: ALL classes have 1-byte header
// C7 max usable: 2047B (2048B total with header)
// malloc(2048+) → routed to Mid allocator
static inline int hak_tiny_size_to_class(size_t size) {
if (size == 0) return -1;
Phase 7-1 PoC: Region-ID Direct Lookup (+39%~+436% improvement!) Implemented ultra-fast header-based free path that eliminates SuperSlab lookup bottleneck (100+ cycles → 5-10 cycles). ## Key Changes 1. **Smart Headers** (core/tiny_region_id.h): - 1-byte header before each allocation stores class_idx - Memory layout: [Header: 1B] [User data: N-1B] - Overhead: <2% average (0% for Slab[0] using wasted padding) 2. **Ultra-Fast Allocation** (core/tiny_alloc_fast.inc.h): - Write header at base: *base = class_idx - Return user pointer: base + 1 3. **Ultra-Fast Free** (core/tiny_free_fast_v2.inc.h): - Read class_idx from header (ptr-1): 2-3 cycles - Push base (ptr-1) to TLS freelist: 3-5 cycles - Total: 5-10 cycles (vs 500+ cycles current!) 4. **Free Path Integration** (core/box/hak_free_api.inc.h): - Removed SuperSlab lookup from fast path - Direct header validation (no lookup needed!) 5. **Size Class Adjustment** (core/hakmem_tiny.h): - Max tiny size: 1023B (was 1024B) - 1024B requests → Mid allocator fallback ## Performance Results | Size | Baseline | Phase 7 | Improvement | |------|----------|---------|-------------| | 128B | 1.22M | 6.54M | **+436%** 🚀 | | 512B | 1.22M | 1.70M | **+39%** | | 1023B | 1.22M | 1.92M | **+57%** | ## Build & Test Enable Phase 7: make HEADER_CLASSIDX=1 bench_random_mixed_hakmem Run benchmark: HAKMEM_TINY_USE_SUPERSLAB=1 ./bench_random_mixed_hakmem 10000 128 1234567 ## Known Issues - 1024B requests fallback to Mid allocator (by design) - Target 40-60M ops/s not yet reached (current: 1.7-6.5M) - Further optimization needed (TLS capacity tuning, refill optimization) ## Credits Design: ChatGPT Pro Ultrathink, Claude Code Implementation: Claude Code with Task Agent Ultrathink support 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-08 03:18:17 +09:00
#if HAKMEM_TINY_HEADER_CLASSIDX
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// Phase C7-UPGRADE: ALL classes have 1-byte header
Phase E3-FINAL: Fix Box API offset bugs - ALL classes now use correct offsets ## Root Cause Analysis (GPT5) **Physical Layout Constraints**: - Class 0: 8B = [1B header][7B payload] → offset 1 = 9B needed = ❌ IMPOSSIBLE - Class 1-6: >=16B = [1B header][15B+ payload] → offset 1 = ✅ POSSIBLE - Class 7: 1KB → offset 0 (compatibility) **Correct Specification**: - HAKMEM_TINY_HEADER_CLASSIDX != 0: - Class 0, 7: next at offset 0 (overwrites header when on freelist) - Class 1-6: next at offset 1 (after header) - HAKMEM_TINY_HEADER_CLASSIDX == 0: - All classes: next at offset 0 **Previous Bug**: - Attempted "ALL classes offset 1" unification - Class 0 with offset 1 caused immediate SEGV (9B > 8B block size) - Mixed 2-arg/3-arg API caused confusion ## Fixes Applied ### 1. Restored 3-Argument Box API (core/box/tiny_next_ptr_box.h) ```c // Correct signatures void tiny_next_write(int class_idx, void* base, void* next_value) void* tiny_next_read(int class_idx, const void* base) // Correct offset calculation size_t offset = (class_idx == 0 || class_idx == 7) ? 0 : 1; ``` ### 2. Updated 123+ Call Sites Across 34 Files - hakmem_tiny_hot_pop_v4.inc.h (4 locations) - hakmem_tiny_fastcache.inc.h (3 locations) - hakmem_tiny_tls_list.h (12 locations) - superslab_inline.h (5 locations) - tiny_fastcache.h (3 locations) - ptr_trace.h (macro definitions) - tls_sll_box.h (2 locations) - + 27 additional files Pattern: `tiny_next_read(base)` → `tiny_next_read(class_idx, base)` Pattern: `tiny_next_write(base, next)` → `tiny_next_write(class_idx, base, next)` ### 3. Added Sentinel Detection Guards - tiny_fast_push(): Block nodes with sentinel in ptr or ptr->next - tls_list_push(): Block nodes with sentinel in ptr or ptr->next - Defense-in-depth against remote free sentinel leakage ## Verification (GPT5 Report) **Test Command**: `./out/release/bench_random_mixed_hakmem --iterations=70000` **Results**: - ✅ Main loop completed successfully - ✅ Drain phase completed successfully - ✅ NO SEGV (previous crash at iteration 66151 is FIXED) - ℹ️ Final log: "tiny_alloc(1024) failed" is normal fallback to Mid/ACE layers **Analysis**: - Class 0 immediate SEGV: ✅ RESOLVED (correct offset 0 now used) - 66K iteration crash: ✅ RESOLVED (offset consistency fixed) - Box API conflicts: ✅ RESOLVED (unified 3-arg API) ## Technical Details ### Offset Logic Justification ``` Class 0: 8B block → next pointer (8B) fits ONLY at offset 0 Class 1: 16B block → next pointer (8B) fits at offset 1 (after 1B header) Class 2: 32B block → next pointer (8B) fits at offset 1 ... Class 6: 512B block → next pointer (8B) fits at offset 1 Class 7: 1024B block → offset 0 for legacy compatibility ``` ### Files Modified (Summary) - Core API: `box/tiny_next_ptr_box.h` - Hot paths: `hakmem_tiny_hot_pop*.inc.h`, `tiny_fastcache.h` - TLS layers: `hakmem_tiny_tls_list.h`, `hakmem_tiny_tls_ops.h` - SuperSlab: `superslab_inline.h`, `tiny_superslab_*.inc.h` - Refill: `hakmem_tiny_refill.inc.h`, `tiny_refill_opt.h` - Free paths: `tiny_free_magazine.inc.h`, `tiny_superslab_free.inc.h` - Documentation: Multiple Phase E3 reports ## Remaining Work None for Box API offset bugs - all structural issues resolved. Future enhancements (non-critical): - Periodic `grep -R '*(void**)' core/` to detect direct pointer access violations - Enforce Box API usage via static analysis - Document offset rationale in architecture docs 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-13 06:50:20 +09:00
// Box: [Header 1B][Data NB] = (N+1) bytes total
// g_tiny_class_sizes stores TOTAL size, so we need size+1 bytes
// User requests N bytes → need (N+1) total → look up class with stride ≥ (N+1)
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
// Max usable: 2047B (C7 stride=2048B)
if (size > 2047) return -1; // 2048+ → Mid allocator
Phase E3-FINAL: Fix Box API offset bugs - ALL classes now use correct offsets ## Root Cause Analysis (GPT5) **Physical Layout Constraints**: - Class 0: 8B = [1B header][7B payload] → offset 1 = 9B needed = ❌ IMPOSSIBLE - Class 1-6: >=16B = [1B header][15B+ payload] → offset 1 = ✅ POSSIBLE - Class 7: 1KB → offset 0 (compatibility) **Correct Specification**: - HAKMEM_TINY_HEADER_CLASSIDX != 0: - Class 0, 7: next at offset 0 (overwrites header when on freelist) - Class 1-6: next at offset 1 (after header) - HAKMEM_TINY_HEADER_CLASSIDX == 0: - All classes: next at offset 0 **Previous Bug**: - Attempted "ALL classes offset 1" unification - Class 0 with offset 1 caused immediate SEGV (9B > 8B block size) - Mixed 2-arg/3-arg API caused confusion ## Fixes Applied ### 1. Restored 3-Argument Box API (core/box/tiny_next_ptr_box.h) ```c // Correct signatures void tiny_next_write(int class_idx, void* base, void* next_value) void* tiny_next_read(int class_idx, const void* base) // Correct offset calculation size_t offset = (class_idx == 0 || class_idx == 7) ? 0 : 1; ``` ### 2. Updated 123+ Call Sites Across 34 Files - hakmem_tiny_hot_pop_v4.inc.h (4 locations) - hakmem_tiny_fastcache.inc.h (3 locations) - hakmem_tiny_tls_list.h (12 locations) - superslab_inline.h (5 locations) - tiny_fastcache.h (3 locations) - ptr_trace.h (macro definitions) - tls_sll_box.h (2 locations) - + 27 additional files Pattern: `tiny_next_read(base)` → `tiny_next_read(class_idx, base)` Pattern: `tiny_next_write(base, next)` → `tiny_next_write(class_idx, base, next)` ### 3. Added Sentinel Detection Guards - tiny_fast_push(): Block nodes with sentinel in ptr or ptr->next - tls_list_push(): Block nodes with sentinel in ptr or ptr->next - Defense-in-depth against remote free sentinel leakage ## Verification (GPT5 Report) **Test Command**: `./out/release/bench_random_mixed_hakmem --iterations=70000` **Results**: - ✅ Main loop completed successfully - ✅ Drain phase completed successfully - ✅ NO SEGV (previous crash at iteration 66151 is FIXED) - ℹ️ Final log: "tiny_alloc(1024) failed" is normal fallback to Mid/ACE layers **Analysis**: - Class 0 immediate SEGV: ✅ RESOLVED (correct offset 0 now used) - 66K iteration crash: ✅ RESOLVED (offset consistency fixed) - Box API conflicts: ✅ RESOLVED (unified 3-arg API) ## Technical Details ### Offset Logic Justification ``` Class 0: 8B block → next pointer (8B) fits ONLY at offset 0 Class 1: 16B block → next pointer (8B) fits at offset 1 (after 1B header) Class 2: 32B block → next pointer (8B) fits at offset 1 ... Class 6: 512B block → next pointer (8B) fits at offset 1 Class 7: 1024B block → offset 0 for legacy compatibility ``` ### Files Modified (Summary) - Core API: `box/tiny_next_ptr_box.h` - Hot paths: `hakmem_tiny_hot_pop*.inc.h`, `tiny_fastcache.h` - TLS layers: `hakmem_tiny_tls_list.h`, `hakmem_tiny_tls_ops.h` - SuperSlab: `superslab_inline.h`, `tiny_superslab_*.inc.h` - Refill: `hakmem_tiny_refill.inc.h`, `tiny_refill_opt.h` - Free paths: `tiny_free_magazine.inc.h`, `tiny_superslab_free.inc.h` - Documentation: Multiple Phase E3 reports ## Remaining Work None for Box API offset bugs - all structural issues resolved. Future enhancements (non-critical): - Periodic `grep -R '*(void**)' core/` to detect direct pointer access violations - Enforce Box API usage via static analysis - Document offset rationale in architecture docs 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-13 06:50:20 +09:00
// Find smallest class where stride ≥ (size + 1)
// LUT maps total_size → class, so lookup (size + 1) to find class with that stride
size_t needed = size + 1; // total bytes needed (data + header)
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
if (needed > 2048) return -1;
return g_size_to_class_lut_2k[needed];
feat: Phase 7 + Phase 2 - Massive performance & stability improvements Performance Achievements: - Tiny allocations: +180-280% (21M → 59-70M ops/s random mixed) - Single-thread: +24% (2.71M → 3.36M ops/s Larson) - 4T stability: 0% → 95% (19/20 success rate) - Overall: 91.3% of System malloc average (target was 40-55%) ✓ Phase 7 (Tasks 1-3): Core Optimizations - Task 1: Header validation removal (Region-ID direct lookup) - Task 2: Aggressive inline (TLS cache access optimization) - Task 3: Pre-warm TLS cache (eliminate cold-start penalty) Result: +180-280% improvement, 85-146% of System malloc Critical Bug Fixes: - Fix 64B allocation crash (size-to-class +1 for header) - Fix 4T wrapper recursion bugs (BUG #7, #8, #10, #11) - Remove malloc fallback (30% → 50% stability) Phase 2a: SuperSlab Dynamic Expansion (CRITICAL) - Implement mimalloc-style chunk linking - Unlimited slab expansion (no more OOM at 32 slabs) - Fix chunk initialization bug (bitmap=0x00000001 after expansion) Files: core/hakmem_tiny_superslab.c/h, core/superslab/superslab_types.h Result: 50% → 95% stability (19/20 4T success) Phase 2b: TLS Cache Adaptive Sizing - Dynamic capacity: 16-2048 slots based on usage - High-water mark tracking + exponential growth/shrink - Expected: +3-10% performance, -30-50% memory Files: core/tiny_adaptive_sizing.c/h (new) Phase 2c: BigCache Dynamic Hash Table - Migrate from fixed 256×8 array to dynamic hash table - Auto-resize: 256 → 512 → 1024 → 65,536 buckets - Improved hash function (FNV-1a) + collision chaining Files: core/hakmem_bigcache.c/h Expected: +10-20% cache hit rate Design Flaws Analysis: - Identified 6 components with fixed-capacity bottlenecks - SuperSlab (CRITICAL), TLS Cache (HIGH), BigCache/L2.5 (MEDIUM) - Report: DESIGN_FLAWS_ANALYSIS.md (11 chapters) Documentation: - 13 comprehensive reports (PHASE*.md, DESIGN_FLAWS*.md) - Implementation guides, test results, production readiness - Bug fix reports, root cause analysis Build System: - Makefile: phase7 targets, PREWARM_TLS flag - Auto dependency generation (-MMD -MP) for .inc files Known Issues: - 4T stability: 19/20 (95%) - investigating 1 failure for 100% - L2.5 Pool dynamic sharding: design only (needs 2-3 days integration) 🤖 Generated with Claude Code (https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-08 17:08:00 +09:00
#else
if (size > 1024) return -1;
Fix C0/C7 class confusion: Upgrade C7 stride to 2048B and fix meta->class_idx initialization Root Cause: 1. C7 stride was 1024B, unable to serve 1024B user requests (need 1025B with header) 2. New SuperSlabs start with meta->class_idx=0 (mmap zero-init) 3. superslab_init_slab() only sets class_idx if meta->class_idx==255 4. Multiple code paths used conditional assignment (if class_idx==255), leaving C7 slabs with class_idx=0 5. This caused C7 blocks to be misidentified as C0, leading to HDR_META_MISMATCH errors Changes: 1. Upgrade C7 stride: 1024B → 2048B (can now serve 1024B requests) 2. Update blocks_per_slab[7]: 64 → 32 (2048B stride / 64KB slab) 3. Update size-to-class LUT: entries 513-2048 now map to C7 4. Fix superslab_init_slab() fail-safe: only reinitialize if class_idx==255 (not 0) 5. Add explicit class_idx assignment in 6 initialization paths: - tiny_superslab_alloc.inc.h: superslab_refill() after init - hakmem_tiny_superslab.c: backend_shared after init (main path) - ss_unified_backend_box.c: unconditional assignment - ss_legacy_backend_box.c: explicit assignment - superslab_expansion_box.c: explicit assignment - ss_allocation_box.c: fail-safe condition fix Fix P0 refill bug: - Update obsolete array access after Phase 3d-B TLS SLL unification - g_tls_sll_head[cls] → g_tls_sll[cls].head - g_tls_sll_count[cls] → g_tls_sll[cls].count Results: - HDR_META_MISMATCH: eliminated (0 errors in 100K iterations) - 1024B allocations now routed to C7 (Tiny fast path) - NXT_MISALIGN warnings remain (legacy 1024B SuperSlabs, separate issue) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 13:44:05 +09:00
return g_size_to_class_lut_2k[size]; // 1..1024
feat: Phase 7 + Phase 2 - Massive performance & stability improvements Performance Achievements: - Tiny allocations: +180-280% (21M → 59-70M ops/s random mixed) - Single-thread: +24% (2.71M → 3.36M ops/s Larson) - 4T stability: 0% → 95% (19/20 success rate) - Overall: 91.3% of System malloc average (target was 40-55%) ✓ Phase 7 (Tasks 1-3): Core Optimizations - Task 1: Header validation removal (Region-ID direct lookup) - Task 2: Aggressive inline (TLS cache access optimization) - Task 3: Pre-warm TLS cache (eliminate cold-start penalty) Result: +180-280% improvement, 85-146% of System malloc Critical Bug Fixes: - Fix 64B allocation crash (size-to-class +1 for header) - Fix 4T wrapper recursion bugs (BUG #7, #8, #10, #11) - Remove malloc fallback (30% → 50% stability) Phase 2a: SuperSlab Dynamic Expansion (CRITICAL) - Implement mimalloc-style chunk linking - Unlimited slab expansion (no more OOM at 32 slabs) - Fix chunk initialization bug (bitmap=0x00000001 after expansion) Files: core/hakmem_tiny_superslab.c/h, core/superslab/superslab_types.h Result: 50% → 95% stability (19/20 4T success) Phase 2b: TLS Cache Adaptive Sizing - Dynamic capacity: 16-2048 slots based on usage - High-water mark tracking + exponential growth/shrink - Expected: +3-10% performance, -30-50% memory Files: core/tiny_adaptive_sizing.c/h (new) Phase 2c: BigCache Dynamic Hash Table - Migrate from fixed 256×8 array to dynamic hash table - Auto-resize: 256 → 512 → 1024 → 65,536 buckets - Improved hash function (FNV-1a) + collision chaining Files: core/hakmem_bigcache.c/h Expected: +10-20% cache hit rate Design Flaws Analysis: - Identified 6 components with fixed-capacity bottlenecks - SuperSlab (CRITICAL), TLS Cache (HIGH), BigCache/L2.5 (MEDIUM) - Report: DESIGN_FLAWS_ANALYSIS.md (11 chapters) Documentation: - 13 comprehensive reports (PHASE*.md, DESIGN_FLAWS*.md) - Implementation guides, test results, production readiness - Bug fix reports, root cause analysis Build System: - Makefile: phase7 targets, PREWARM_TLS flag - Auto dependency generation (-MMD -MP) for .inc files Known Issues: - 4T stability: 19/20 (95%) - investigating 1 failure for 100% - L2.5 Pool dynamic sharding: design only (needs 2-3 days integration) 🤖 Generated with Claude Code (https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-08 17:08:00 +09:00
#endif
}
// ============================================================================
// Phase 6.12.1: O(1) Slab Lookup (Embedded Metadata)
// ============================================================================
// Phase 6.12.1: Find slab owner by pointer
// NOTE: Reverted from O(1) embedded metadata to O(N) linear search for safety
// Embedded metadata requires dereferencing potentially unmapped memory
// This is still faster than before because Option C eliminates duplicate calls
TinySlab* hak_tiny_owner_slab(void* ptr);
// ============================================================================
// Bitmap Operations (inline for speed)
// ============================================================================
// Set block as used
static inline void hak_tiny_set_used(TinySlab* slab, int block_idx) {
int word_idx = block_idx / 64;
int bit_idx = block_idx % 64;
uint64_t v = slab->bitmap[word_idx] | (1ULL << bit_idx);
slab->bitmap[word_idx] = v;
// update summary: set to 1 if any free bit remains, else 0
int sum_word = word_idx / 64;
int sum_bit = word_idx % 64;
uint64_t has_free = ~v; // any zero in word means free
if (has_free != 0) {
slab->summary[sum_word] |= (1ULL << sum_bit);
} else {
slab->summary[sum_word] &= ~(1ULL << sum_bit);
}
}
// Set block as free
static inline void hak_tiny_set_free(TinySlab* slab, int block_idx) {
int word_idx = block_idx / 64;
int bit_idx = block_idx % 64;
uint64_t v = slab->bitmap[word_idx] & ~(1ULL << bit_idx);
slab->bitmap[word_idx] = v;
// update summary: this word now certainly has a free bit
int sum_word = word_idx / 64;
int sum_bit = word_idx % 64;
slab->summary[sum_word] |= (1ULL << sum_bit);
}
// Check if block is used
static inline int hak_tiny_is_used(TinySlab* slab, int block_idx) {
int word_idx = block_idx / 64;
int bit_idx = block_idx % 64;
return (slab->bitmap[word_idx] & (1ULL << bit_idx)) != 0;
}
// Find first free block (returns -1 if none)
static inline int hak_tiny_find_free_block(TinySlab* slab) {
// Trace bitmap scan attempts
HAK_TP1(bitmap_scan, slab->class_idx);
const int bw = g_tiny_bitmap_words[slab->class_idx];
const int sw = slab->summary_words;
if (bw <= 0 || sw <= 0) return -1;
int start_word = slab->hint_word % bw;
int start_sw = start_word / 64;
int start_sb = start_word % 64;
for (int k = 0; k < sw; k++) {
int idx = start_sw + k;
if (idx >= sw) idx -= sw;
uint64_t bits = slab->summary[idx];
// mask low bits on first iteration
if (k == 0) {
bits &= (~0ULL) << start_sb;
}
// mask out-of-range bits in last summary word
if (idx == sw - 1 && (bw % 64) != 0) {
uint64_t mask = (bw % 64) == 64 ? ~0ULL : ((1ULL << (bw % 64)) - 1ULL);
bits &= mask;
}
if (bits == 0) continue;
int woff = __builtin_ctzll(bits); // word offset within this summary word
int word_idx = idx * 64 + woff; // bitmap word index
if (word_idx >= bw) continue; // safety
uint64_t used = slab->bitmap[word_idx];
uint64_t free_bits = ~used;
if (free_bits == 0) continue; // should not happen if summary correct
int bit_idx = __builtin_ctzll(free_bits); // first free block within word
slab->hint_word = (uint16_t)((word_idx + 1) % bw);
return word_idx * 64 + bit_idx;
}
return -1;
}
#endif // HAKMEM_TINY_H