2025-11-20 07:32:30 +09:00
|
|
|
// Box: Legacy Backend (Phase 12)
|
|
|
|
|
// Purpose: Per-class SuperSlabHead backend (legacy implementation)
|
|
|
|
|
|
|
|
|
|
#include "ss_legacy_backend_box.h"
|
|
|
|
|
#include "ss_allocation_box.h"
|
|
|
|
|
#include "hakmem_tiny_config.h"
|
|
|
|
|
#include "hakmem_tiny.h" // For tiny_self_u32
|
2025-11-29 05:13:04 +09:00
|
|
|
#include "../tiny_region_id.h" // For tiny_region_id_write_header
|
2025-11-20 07:32:30 +09:00
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
// Global State
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
|
|
|
|
// Phase 2a: Dynamic Expansion - Global per-class SuperSlabHeads
|
|
|
|
|
SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS] = {NULL};
|
|
|
|
|
|
|
|
|
|
// Legacy fallback hint box (per-thread, per-class)
|
|
|
|
|
static __thread SuperSlab* g_ss_legacy_hint_ss[TINY_NUM_CLASSES_SS];
|
|
|
|
|
static __thread uint8_t g_ss_legacy_hint_slab[TINY_NUM_CLASSES_SS];
|
|
|
|
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
// Hint Box (Optional Optimization)
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
|
|
|
|
void hak_tiny_ss_hint_record(int class_idx, SuperSlab* ss, int slab_idx)
|
|
|
|
|
{
|
|
|
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) return;
|
|
|
|
|
if (!ss || slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) return;
|
|
|
|
|
g_ss_legacy_hint_ss[class_idx] = ss;
|
|
|
|
|
g_ss_legacy_hint_slab[class_idx] = (uint8_t)slab_idx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void* hak_tiny_alloc_superslab_backend_hint(int class_idx)
|
|
|
|
|
{
|
|
|
|
|
static int g_hint_enabled = -1;
|
|
|
|
|
if (__builtin_expect(g_hint_enabled == -1, 0)) {
|
|
|
|
|
const char* e = getenv("HAKMEM_TINY_SS_LEGACY_HINT");
|
|
|
|
|
g_hint_enabled = (e && *e && *e != '0') ? 1 : 0;
|
|
|
|
|
}
|
|
|
|
|
if (!g_hint_enabled) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SuperSlab* ss = g_ss_legacy_hint_ss[class_idx];
|
|
|
|
|
int slab_idx = (int)g_ss_legacy_hint_slab[class_idx];
|
|
|
|
|
if (!ss) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Basic sanity: Superslab still alive?
|
|
|
|
|
if (ss->magic != SUPERSLAB_MAGIC) {
|
|
|
|
|
g_ss_legacy_hint_ss[class_idx] = NULL;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
if (slab_idx < 0 || slab_idx >= ss_slabs_capacity(ss)) {
|
|
|
|
|
g_ss_legacy_hint_ss[class_idx] = NULL;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TinySlabMeta* meta = &ss->slabs[slab_idx];
|
|
|
|
|
if (meta->capacity == 0 || meta->used >= meta->capacity) {
|
|
|
|
|
// Hint slab exhausted; clear and fall back.
|
|
|
|
|
g_ss_legacy_hint_ss[class_idx] = NULL;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
if (meta->class_idx != (uint8_t)class_idx && meta->class_idx != 255) {
|
|
|
|
|
// Different class bound; hint no longer valid.
|
|
|
|
|
g_ss_legacy_hint_ss[class_idx] = NULL;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t stride = tiny_block_stride_for_class(class_idx);
|
|
|
|
|
size_t offset = (size_t)meta->used * stride;
|
|
|
|
|
size_t slab_base_off = SUPERSLAB_SLAB0_DATA_OFFSET
|
|
|
|
|
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE;
|
|
|
|
|
uint8_t* base = (uint8_t*)ss + slab_base_off + offset;
|
|
|
|
|
|
|
|
|
|
meta->used++;
|
|
|
|
|
atomic_fetch_add_explicit(&ss->total_active_blocks, 1, memory_order_relaxed);
|
|
|
|
|
|
|
|
|
|
// Keep hint as long as there is remaining capacity.
|
|
|
|
|
if (meta->used >= meta->capacity) {
|
|
|
|
|
g_ss_legacy_hint_ss[class_idx] = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-29 05:13:04 +09:00
|
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
|
|
|
return tiny_region_id_write_header(base, class_idx);
|
|
|
|
|
#else
|
2025-11-20 07:32:30 +09:00
|
|
|
return (void*)base;
|
2025-11-29 05:13:04 +09:00
|
|
|
#endif
|
2025-11-20 07:32:30 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
// Legacy Backend Implementation
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Legacy backend for hak_tiny_alloc_superslab_box().
|
|
|
|
|
*
|
|
|
|
|
* Phase 12 Stage A/B:
|
|
|
|
|
* - Uses per-class SuperSlabHead (g_superslab_heads) as the implementation.
|
|
|
|
|
* - Callers MUST use hak_tiny_alloc_superslab_box() and never touch this directly.
|
|
|
|
|
* - Later Stage C: this function will be replaced by a shared_pool backend.
|
|
|
|
|
*/
|
|
|
|
|
void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
|
|
|
|
|
{
|
|
|
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SuperSlabHead* head = g_superslab_heads[class_idx];
|
|
|
|
|
if (!head) {
|
|
|
|
|
head = init_superslab_head(class_idx);
|
|
|
|
|
if (!head) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
g_superslab_heads[class_idx] = head;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SuperSlab* chunk = head->current_chunk ? head->current_chunk : head->first_chunk;
|
|
|
|
|
|
|
|
|
|
while (chunk) {
|
|
|
|
|
int cap = ss_slabs_capacity(chunk);
|
|
|
|
|
for (int slab_idx = 0; slab_idx < cap; slab_idx++) {
|
|
|
|
|
TinySlabMeta* meta = &chunk->slabs[slab_idx];
|
|
|
|
|
|
2025-11-21 13:44:05 +09:00
|
|
|
// Skip slabs that belong to a different class (or are uninitialized).
|
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup
This commit implements the first phase of Tiny Pool redesign based on
ChatGPT architecture review. The goal is to eliminate Header/Next pointer
conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata).
## P0.1: C0(8B) class upgraded to 16B
- Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes)
- LUT updated: 1..16 → class 0, 17..32 → class 1, etc.
- tiny_next_off: C0 now uses offset 1 (header preserved)
- Eliminates edge cases for 8B allocations
## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h)
- New Box for draining TLS SLL before slab reuse
- ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1
- Prevents stale pointers when slabs are recycled
- Follows Box theory: single responsibility, minimal API
## P1.1: SuperSlab class_map addition
- Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab
- Maps slab_idx → class_idx for out-of-band lookup
- Initialized to 255 (UNASSIGNED) on SuperSlab creation
- Set correctly on slab initialization in all backends
## P1.2: Free fast path uses class_map
- ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1
- Free path can now get class_idx from class_map instead of Header
- Falls back to Header read if class_map returns invalid value
- Fixed Legacy Backend dynamic slab initialization bug
## Documentation added
- HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis
- TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis
- PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking
- TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3)
## Test results
- Baseline: 70% success rate (30% crash - pre-existing issue)
- class_map enabled: 70% success rate (same as baseline)
- Performance: ~30.5M ops/s (unchanged)
## Next steps (P1.3, P2, P3)
- P1.3: Add meta->active for accurate TLS/freelist sync
- P2: TLS SLL redesign with Box-based counting
- P3: Complete Header out-of-band migration
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
|
|
|
if (meta->class_idx != (uint8_t)class_idx && meta->class_idx != 255) {
|
2025-11-21 13:44:05 +09:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup
This commit implements the first phase of Tiny Pool redesign based on
ChatGPT architecture review. The goal is to eliminate Header/Next pointer
conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata).
## P0.1: C0(8B) class upgraded to 16B
- Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes)
- LUT updated: 1..16 → class 0, 17..32 → class 1, etc.
- tiny_next_off: C0 now uses offset 1 (header preserved)
- Eliminates edge cases for 8B allocations
## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h)
- New Box for draining TLS SLL before slab reuse
- ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1
- Prevents stale pointers when slabs are recycled
- Follows Box theory: single responsibility, minimal API
## P1.1: SuperSlab class_map addition
- Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab
- Maps slab_idx → class_idx for out-of-band lookup
- Initialized to 255 (UNASSIGNED) on SuperSlab creation
- Set correctly on slab initialization in all backends
## P1.2: Free fast path uses class_map
- ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1
- Free path can now get class_idx from class_map instead of Header
- Falls back to Header read if class_map returns invalid value
- Fixed Legacy Backend dynamic slab initialization bug
## Documentation added
- HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis
- TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis
- PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking
- TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3)
## Test results
- Baseline: 70% success rate (30% crash - pre-existing issue)
- class_map enabled: 70% success rate (same as baseline)
- Performance: ~30.5M ops/s (unchanged)
## Next steps (P1.3, P2, P3)
- P1.3: Add meta->active for accurate TLS/freelist sync
- P2: TLS SLL redesign with Box-based counting
- P3: Complete Header out-of-band migration
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
|
|
|
// P1.2 FIX: Initialize slab on first use (like shared backend does)
|
|
|
|
|
// This ensures class_map is populated for all slabs, not just slab 0
|
2025-11-20 07:32:30 +09:00
|
|
|
if (meta->capacity == 0) {
|
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup
This commit implements the first phase of Tiny Pool redesign based on
ChatGPT architecture review. The goal is to eliminate Header/Next pointer
conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata).
## P0.1: C0(8B) class upgraded to 16B
- Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes)
- LUT updated: 1..16 → class 0, 17..32 → class 1, etc.
- tiny_next_off: C0 now uses offset 1 (header preserved)
- Eliminates edge cases for 8B allocations
## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h)
- New Box for draining TLS SLL before slab reuse
- ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1
- Prevents stale pointers when slabs are recycled
- Follows Box theory: single responsibility, minimal API
## P1.1: SuperSlab class_map addition
- Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab
- Maps slab_idx → class_idx for out-of-band lookup
- Initialized to 255 (UNASSIGNED) on SuperSlab creation
- Set correctly on slab initialization in all backends
## P1.2: Free fast path uses class_map
- ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1
- Free path can now get class_idx from class_map instead of Header
- Falls back to Header read if class_map returns invalid value
- Fixed Legacy Backend dynamic slab initialization bug
## Documentation added
- HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis
- TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis
- PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking
- TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3)
## Test results
- Baseline: 70% success rate (30% crash - pre-existing issue)
- class_map enabled: 70% success rate (same as baseline)
- Performance: ~30.5M ops/s (unchanged)
## Next steps (P1.3, P2, P3)
- P1.3: Add meta->active for accurate TLS/freelist sync
- P2: TLS SLL redesign with Box-based counting
- P3: Complete Header out-of-band migration
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
|
|
|
size_t block_size = g_tiny_class_sizes[class_idx];
|
|
|
|
|
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
|
|
|
|
|
superslab_init_slab(chunk, slab_idx, block_size, owner_tid);
|
|
|
|
|
meta = &chunk->slabs[slab_idx]; // Refresh pointer after init
|
|
|
|
|
meta->class_idx = (uint8_t)class_idx;
|
|
|
|
|
// P1.2: Update class_map for dynamic slab initialization
|
|
|
|
|
chunk->class_map[slab_idx] = (uint8_t)class_idx;
|
2025-11-20 07:32:30 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (meta->used < meta->capacity) {
|
2025-11-21 23:00:24 +09:00
|
|
|
// NOTE: Geometry validation removed (redundant)
|
|
|
|
|
// Stride table is now correct in tiny_block_stride_for_class(),
|
|
|
|
|
// and shared_pool validates geometry at acquisition time.
|
2025-11-20 07:32:30 +09:00
|
|
|
size_t stride = tiny_block_stride_for_class(class_idx);
|
|
|
|
|
size_t offset = (size_t)meta->used * stride;
|
|
|
|
|
uint8_t* base = (uint8_t*)chunk
|
|
|
|
|
+ SUPERSLAB_SLAB0_DATA_OFFSET
|
|
|
|
|
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE
|
|
|
|
|
+ offset;
|
|
|
|
|
|
|
|
|
|
hak_tiny_ss_hint_record(class_idx, chunk, slab_idx);
|
|
|
|
|
meta->used++;
|
|
|
|
|
atomic_fetch_add_explicit(&chunk->total_active_blocks, 1, memory_order_relaxed);
|
2025-11-29 05:13:04 +09:00
|
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
|
|
|
return tiny_region_id_write_header(base, class_idx);
|
|
|
|
|
#else
|
2025-11-20 07:32:30 +09:00
|
|
|
return (void*)base;
|
2025-11-29 05:13:04 +09:00
|
|
|
#endif
|
2025-11-20 07:32:30 +09:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
chunk = chunk->next_chunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (expand_superslab_head(head) < 0) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SuperSlab* new_chunk = head->current_chunk;
|
|
|
|
|
if (!new_chunk) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int cap2 = ss_slabs_capacity(new_chunk);
|
|
|
|
|
for (int slab_idx = 0; slab_idx < cap2; slab_idx++) {
|
|
|
|
|
TinySlabMeta* meta = &new_chunk->slabs[slab_idx];
|
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup
This commit implements the first phase of Tiny Pool redesign based on
ChatGPT architecture review. The goal is to eliminate Header/Next pointer
conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata).
## P0.1: C0(8B) class upgraded to 16B
- Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes)
- LUT updated: 1..16 → class 0, 17..32 → class 1, etc.
- tiny_next_off: C0 now uses offset 1 (header preserved)
- Eliminates edge cases for 8B allocations
## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h)
- New Box for draining TLS SLL before slab reuse
- ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1
- Prevents stale pointers when slabs are recycled
- Follows Box theory: single responsibility, minimal API
## P1.1: SuperSlab class_map addition
- Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab
- Maps slab_idx → class_idx for out-of-band lookup
- Initialized to 255 (UNASSIGNED) on SuperSlab creation
- Set correctly on slab initialization in all backends
## P1.2: Free fast path uses class_map
- ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1
- Free path can now get class_idx from class_map instead of Header
- Falls back to Header read if class_map returns invalid value
- Fixed Legacy Backend dynamic slab initialization bug
## Documentation added
- HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis
- TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis
- PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking
- TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3)
## Test results
- Baseline: 70% success rate (30% crash - pre-existing issue)
- class_map enabled: 70% success rate (same as baseline)
- Performance: ~30.5M ops/s (unchanged)
## Next steps (P1.3, P2, P3)
- P1.3: Add meta->active for accurate TLS/freelist sync
- P2: TLS SLL redesign with Box-based counting
- P3: Complete Header out-of-band migration
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
|
|
|
|
|
|
|
|
// P1.2 FIX: Initialize slab on first use (like shared backend does)
|
|
|
|
|
if (meta->capacity == 0) {
|
|
|
|
|
size_t block_size = g_tiny_class_sizes[class_idx];
|
|
|
|
|
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
|
|
|
|
|
superslab_init_slab(new_chunk, slab_idx, block_size, owner_tid);
|
|
|
|
|
meta = &new_chunk->slabs[slab_idx]; // Refresh pointer after init
|
|
|
|
|
meta->class_idx = (uint8_t)class_idx;
|
|
|
|
|
// P1.2: Update class_map for dynamic slab initialization
|
|
|
|
|
new_chunk->class_map[slab_idx] = (uint8_t)class_idx;
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-20 07:32:30 +09:00
|
|
|
if (meta->used < meta->capacity) {
|
|
|
|
|
size_t stride = tiny_block_stride_for_class(class_idx);
|
|
|
|
|
size_t offset = (size_t)meta->used * stride;
|
|
|
|
|
uint8_t* base = (uint8_t*)new_chunk
|
|
|
|
|
+ SUPERSLAB_SLAB0_DATA_OFFSET
|
|
|
|
|
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE
|
|
|
|
|
+ offset;
|
|
|
|
|
|
|
|
|
|
hak_tiny_ss_hint_record(class_idx, new_chunk, slab_idx);
|
|
|
|
|
meta->used++;
|
|
|
|
|
atomic_fetch_add_explicit(&new_chunk->total_active_blocks, 1, memory_order_relaxed);
|
2025-11-29 05:13:04 +09:00
|
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
|
|
|
return tiny_region_id_write_header(base, class_idx);
|
|
|
|
|
#else
|
2025-11-20 07:32:30 +09:00
|
|
|
return (void*)base;
|
2025-11-29 05:13:04 +09:00
|
|
|
#endif
|
2025-11-20 07:32:30 +09:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
// SuperSlabHead Management
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
|
|
|
|
// Initialize SuperSlabHead for a class
|
|
|
|
|
SuperSlabHead* init_superslab_head(int class_idx) {
|
|
|
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Allocate SuperSlabHead structure
|
|
|
|
|
SuperSlabHead* head = (SuperSlabHead*)calloc(1, sizeof(SuperSlabHead));
|
|
|
|
|
if (!head) {
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate SuperSlabHead for class %d\n", class_idx);
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
head->class_idx = (uint8_t)class_idx;
|
|
|
|
|
atomic_store_explicit(&head->total_chunks, 0, memory_order_relaxed);
|
|
|
|
|
head->first_chunk = NULL;
|
|
|
|
|
head->current_chunk = NULL;
|
|
|
|
|
pthread_mutex_init(&head->expansion_lock, NULL);
|
|
|
|
|
|
|
|
|
|
// Allocate initial chunk(s)
|
|
|
|
|
// Hot classes (1, 4, 6) get 2 initial chunks to reduce contention
|
|
|
|
|
int initial_chunks = 1;
|
|
|
|
|
|
|
|
|
|
// Phase 2a: Start with 1 chunk for all classes (expansion will handle growth)
|
|
|
|
|
// This reduces startup memory overhead while still allowing unlimited growth
|
|
|
|
|
initial_chunks = 1;
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < initial_chunks; i++) {
|
|
|
|
|
if (expand_superslab_head(head) < 0) {
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate initial chunk %d for class %d\n",
|
|
|
|
|
i, class_idx);
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
|
|
|
|
|
// Cleanup on failure
|
|
|
|
|
SuperSlab* chunk = head->first_chunk;
|
|
|
|
|
while (chunk) {
|
|
|
|
|
SuperSlab* next = chunk->next_chunk;
|
|
|
|
|
superslab_free(chunk);
|
|
|
|
|
chunk = next;
|
|
|
|
|
}
|
|
|
|
|
pthread_mutex_destroy(&head->expansion_lock);
|
|
|
|
|
free(head);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
|
|
|
fprintf(stderr, "[HAKMEM] Initialized SuperSlabHead for class %d: %zu initial chunks\n",
|
|
|
|
|
class_idx, atomic_load_explicit(&head->total_chunks, memory_order_relaxed));
|
|
|
|
|
#endif
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
|
|
|
|
|
return head;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Expand SuperSlabHead by allocating and linking a new chunk
|
|
|
|
|
int expand_superslab_head(SuperSlabHead* head) {
|
|
|
|
|
if (!head) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Allocate new chunk via existing superslab_allocate
|
|
|
|
|
SuperSlab* new_chunk = superslab_allocate(head->class_idx);
|
|
|
|
|
if (!new_chunk) {
|
|
|
|
|
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate new chunk for class %d (system OOM)\n",
|
|
|
|
|
head->class_idx);
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
#endif
|
|
|
|
|
return -1; // True OOM (system out of memory)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CRITICAL FIX: Initialize slab 0 so bitmap != 0x00000000
|
|
|
|
|
// Phase 2a chunks must have at least one usable slab after allocation
|
|
|
|
|
size_t block_size = g_tiny_class_sizes[head->class_idx];
|
|
|
|
|
// Use pthread_self() directly since tiny_self_u32() is static inline in hakmem_tiny.c
|
|
|
|
|
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
|
|
|
|
|
|
|
|
|
|
superslab_init_slab(new_chunk, 0, block_size, owner_tid);
|
|
|
|
|
|
2025-11-21 13:44:05 +09:00
|
|
|
// CRITICAL FIX: Explicitly set class_idx to avoid C0/C7 confusion.
|
|
|
|
|
// New SuperSlabs start with meta->class_idx=0 (mmap zero-init).
|
|
|
|
|
new_chunk->slabs[0].class_idx = (uint8_t)head->class_idx;
|
Tiny Pool redesign: P0.1, P0.3, P1.1, P1.2 - Out-of-band class_idx lookup
This commit implements the first phase of Tiny Pool redesign based on
ChatGPT architecture review. The goal is to eliminate Header/Next pointer
conflicts by moving class_idx lookup out-of-band (to SuperSlab metadata).
## P0.1: C0(8B) class upgraded to 16B
- Size table changed: {16,32,64,128,256,512,1024,2048} (8 classes)
- LUT updated: 1..16 → class 0, 17..32 → class 1, etc.
- tiny_next_off: C0 now uses offset 1 (header preserved)
- Eliminates edge cases for 8B allocations
## P0.3: Slab reuse guard Box (tls_slab_reuse_guard_box.h)
- New Box for draining TLS SLL before slab reuse
- ENV gate: HAKMEM_TINY_SLAB_REUSE_GUARD=1
- Prevents stale pointers when slabs are recycled
- Follows Box theory: single responsibility, minimal API
## P1.1: SuperSlab class_map addition
- Added uint8_t class_map[SLABS_PER_SUPERSLAB_MAX] to SuperSlab
- Maps slab_idx → class_idx for out-of-band lookup
- Initialized to 255 (UNASSIGNED) on SuperSlab creation
- Set correctly on slab initialization in all backends
## P1.2: Free fast path uses class_map
- ENV gate: HAKMEM_TINY_USE_CLASS_MAP=1
- Free path can now get class_idx from class_map instead of Header
- Falls back to Header read if class_map returns invalid value
- Fixed Legacy Backend dynamic slab initialization bug
## Documentation added
- HAKMEM_ARCHITECTURE_OVERVIEW.md: 4-layer architecture analysis
- TLS_SLL_ARCHITECTURE_INVESTIGATION.md: Root cause analysis
- PTR_LIFECYCLE_TRACE_AND_ROOT_CAUSE_ANALYSIS.md: Pointer tracking
- TINY_REDESIGN_CHECKLIST.md: Implementation roadmap (P0-P3)
## Test results
- Baseline: 70% success rate (30% crash - pre-existing issue)
- class_map enabled: 70% success rate (same as baseline)
- Performance: ~30.5M ops/s (unchanged)
## Next steps (P1.3, P2, P3)
- P1.3: Add meta->active for accurate TLS/freelist sync
- P2: TLS SLL redesign with Box-based counting
- P3: Complete Header out-of-band migration
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-28 13:42:39 +09:00
|
|
|
// P1.1: Update class_map for legacy backend
|
|
|
|
|
new_chunk->class_map[0] = (uint8_t)head->class_idx;
|
2025-11-21 13:44:05 +09:00
|
|
|
|
2025-11-20 07:32:30 +09:00
|
|
|
// Initialize the next_chunk link to NULL
|
|
|
|
|
new_chunk->next_chunk = NULL;
|
|
|
|
|
|
|
|
|
|
// Thread-safe linking
|
|
|
|
|
pthread_mutex_lock(&head->expansion_lock);
|
|
|
|
|
|
|
|
|
|
if (head->current_chunk) {
|
|
|
|
|
// Find the tail of the list (optimization: could cache tail pointer)
|
|
|
|
|
SuperSlab* tail = head->current_chunk;
|
|
|
|
|
while (tail->next_chunk) {
|
|
|
|
|
tail = tail->next_chunk;
|
|
|
|
|
}
|
|
|
|
|
tail->next_chunk = new_chunk;
|
|
|
|
|
} else {
|
|
|
|
|
// First chunk
|
|
|
|
|
head->first_chunk = new_chunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update current chunk to new chunk (for fast allocation)
|
|
|
|
|
head->current_chunk = new_chunk;
|
|
|
|
|
|
|
|
|
|
// Increment total chunks atomically
|
|
|
|
|
size_t old_count = atomic_fetch_add_explicit(&head->total_chunks, 1, memory_order_relaxed);
|
|
|
|
|
size_t new_count = old_count + 1;
|
|
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
|
|
|
|
|
|
|
|
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
|
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
|
|
|
g_hakmem_lock_depth++;
|
|
|
|
|
fprintf(stderr, "[HAKMEM] Expanded SuperSlabHead for class %d: %zu chunks now (bitmap=0x%08x)\n",
|
|
|
|
|
head->class_idx, new_count, new_chunk->slab_bitmap);
|
|
|
|
|
g_hakmem_lock_depth--;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Find which chunk a pointer belongs to
|
|
|
|
|
SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx) {
|
|
|
|
|
if (!ptr || class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SuperSlabHead* head = g_superslab_heads[class_idx];
|
|
|
|
|
if (!head) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uintptr_t ptr_addr = (uintptr_t)ptr;
|
|
|
|
|
|
|
|
|
|
// Walk the chunk list
|
|
|
|
|
SuperSlab* chunk = head->first_chunk;
|
|
|
|
|
while (chunk) {
|
|
|
|
|
// Check if ptr is within this chunk's memory range
|
|
|
|
|
// Each chunk is aligned to SUPERSLAB_SIZE (1MB or 2MB)
|
|
|
|
|
uintptr_t chunk_start = (uintptr_t)chunk;
|
|
|
|
|
size_t chunk_size = (size_t)1 << chunk->lg_size; // Use actual chunk size
|
|
|
|
|
uintptr_t chunk_end = chunk_start + chunk_size;
|
|
|
|
|
|
|
|
|
|
if (ptr_addr >= chunk_start && ptr_addr < chunk_end) {
|
|
|
|
|
// Found the chunk
|
|
|
|
|
return chunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chunk = chunk->next_chunk;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL; // Not found in any chunk
|
|
|
|
|
}
|