Summary of Changes: MOVED TO ARCHIVE: - core/hakmem_tiny_legacy_slow_box.inc → archive/ * Slow path legacy code preserved for reference * Superseded by Gatekeeper Box architecture - core/superslab_allocate.c → archive/superslab_allocate_legacy.c * Legacy SuperSlab allocation implementation * Functionality integrated into new Box system - core/superslab_head.c → archive/superslab_head_legacy.c * Legacy slab head management * Refactored through Box architecture REMOVED DEAD CODE: - Eliminated unused allocation policy variants from ss_allocation_box.c * Reduced from 127+ lines of conditional logic to focused implementation * Removed: old policy branches, unused allocation strategies * Kept: current Box-based allocation path ADDED NEW INFRASTRUCTURE: - core/superslab_head_stub.c (41 lines) * Minimal stub for backward compatibility * Delegates to new architecture - Enhanced core/superslab_cache.c (75 lines added) * Added missing API functions for cache management * Proper interface for SuperSlab cache integration REFACTORED CORE SYSTEMS: - core/hakmem_super_registry.c * Moved registration logic from scattered locations * Centralized SuperSlab registry management - core/hakmem_tiny.c * Removed 27 lines of redundant initialization * Simplified through Box architecture - core/hakmem_tiny_alloc.inc * Streamlined allocation path to use Gatekeeper * Removed legacy decision logic - core/box/ss_allocation_box.c/h * Dramatically simplified allocation policy * Removed conditional branches for unused strategies * Focused on current Box-based approach BUILD SYSTEM: - Updated Makefile for archive structure - Removed obsolete object file references - Maintained build compatibility SAFETY & TESTING: - All deletions verified: no broken references - Build verification: RELEASE=0 and RELEASE=1 pass - Smoke tests: 100% pass rate - Functional verification: allocation/free intact Architecture Consolidation: Before: Multiple overlapping allocation paths with legacy code branches After: Single unified path through Gatekeeper Boxes with clear architecture Benefits: - Reduced code size and complexity - Improved maintainability - Single source of truth for allocation logic - Better diagnostic/observability hooks - Foundation for future optimizations 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
196 lines
6.3 KiB
C
196 lines
6.3 KiB
C
// Archived legacy SuperSlabHead implementation.
|
|
// Not compiled by default; kept for reference / A/B rollback.
|
|
// Source moved from core/superslab_head.c after shared-pool backend migration.
|
|
|
|
#include "../core/hakmem_tiny_superslab_internal.h"
|
|
|
|
// ============================================================================
|
|
// Phase 2a: Dynamic Expansion - Global per-class SuperSlabHeads
|
|
// ============================================================================
|
|
|
|
SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS] = {NULL};
|
|
|
|
// ============================================================================
|
|
// SuperSlabHead Management Functions
|
|
// ============================================================================
|
|
|
|
// Initialize SuperSlabHead for a class
|
|
SuperSlabHead* init_superslab_head(int class_idx) {
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
return NULL;
|
|
}
|
|
|
|
// Allocate SuperSlabHead structure
|
|
SuperSlabHead* head = (SuperSlabHead*)calloc(1, sizeof(SuperSlabHead));
|
|
if (!head) {
|
|
extern __thread int g_hakmem_lock_depth;
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate SuperSlabHead for class %d\n", class_idx);
|
|
g_hakmem_lock_depth--;
|
|
return NULL;
|
|
}
|
|
|
|
head->class_idx = (uint8_t)class_idx;
|
|
atomic_store_explicit(&head->total_chunks, 0, memory_order_relaxed);
|
|
head->first_chunk = NULL;
|
|
head->current_chunk = NULL;
|
|
pthread_mutex_init(&head->expansion_lock, NULL);
|
|
|
|
// Allocate initial chunk(s)
|
|
int initial_chunks = 1;
|
|
|
|
for (int i = 0; i < initial_chunks; i++) {
|
|
if (expand_superslab_head(head) < 0) {
|
|
extern __thread int g_hakmem_lock_depth;
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate initial chunk %d for class %d\n",
|
|
i, class_idx);
|
|
g_hakmem_lock_depth--;
|
|
|
|
// Cleanup on failure
|
|
SuperSlab* chunk = head->first_chunk;
|
|
while (chunk) {
|
|
SuperSlab* next = chunk->next_chunk;
|
|
superslab_free(chunk);
|
|
chunk = next;
|
|
}
|
|
pthread_mutex_destroy(&head->expansion_lock);
|
|
free(head);
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
extern __thread int g_hakmem_lock_depth;
|
|
g_hakmem_lock_depth++;
|
|
#if !HAKMEM_BUILD_RELEASE
|
|
fprintf(stderr, "[HAKMEM] Initialized SuperSlabHead for class %d: %zu initial chunks\n",
|
|
class_idx, atomic_load_explicit(&head->total_chunks, memory_order_relaxed));
|
|
#endif
|
|
g_hakmem_lock_depth--;
|
|
|
|
return head;
|
|
}
|
|
|
|
// Expand SuperSlabHead by allocating and linking a new chunk
|
|
int expand_superslab_head(SuperSlabHead* head) {
|
|
if (!head) {
|
|
return -1;
|
|
}
|
|
|
|
// Allocate new chunk via existing superslab_allocate
|
|
SuperSlab* new_chunk = superslab_allocate(head->class_idx);
|
|
if (!new_chunk) {
|
|
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
|
|
extern __thread int g_hakmem_lock_depth;
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[HAKMEM] CRITICAL: Failed to allocate new chunk for class %d (system OOM)\n",
|
|
head->class_idx);
|
|
g_hakmem_lock_depth--;
|
|
#endif
|
|
return -1; // True OOM (system out of memory)
|
|
}
|
|
|
|
// Initialize slab 0 so bitmap != 0x00000000
|
|
size_t block_size = g_tiny_class_sizes[head->class_idx];
|
|
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
|
|
|
|
superslab_init_slab(new_chunk, 0, block_size, owner_tid);
|
|
|
|
// Initialize the next_chunk link to NULL
|
|
new_chunk->next_chunk = NULL;
|
|
|
|
// Thread-safe linking
|
|
pthread_mutex_lock(&head->expansion_lock);
|
|
|
|
if (head->current_chunk) {
|
|
SuperSlab* tail = head->current_chunk;
|
|
while (tail->next_chunk) {
|
|
tail = tail->next_chunk;
|
|
}
|
|
tail->next_chunk = new_chunk;
|
|
} else {
|
|
head->first_chunk = new_chunk;
|
|
}
|
|
|
|
head->current_chunk = new_chunk;
|
|
|
|
size_t old_count = atomic_fetch_add_explicit(&head->total_chunks, 1, memory_order_relaxed);
|
|
size_t new_count = old_count + 1;
|
|
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
|
|
#if !defined(NDEBUG) || defined(HAKMEM_SUPERSLAB_VERBOSE)
|
|
extern __thread int g_hakmem_lock_depth;
|
|
g_hakmem_lock_depth++;
|
|
fprintf(stderr, "[HAKMEM] Expanded SuperSlabHead for class %d: %zu chunks now (bitmap=0x%08x)\n",
|
|
head->class_idx, new_count, new_chunk->slab_bitmap);
|
|
g_hakmem_lock_depth--;
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
// Find which chunk a pointer belongs to
|
|
SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx) {
|
|
if (!ptr || class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
return NULL;
|
|
}
|
|
|
|
SuperSlabHead* head = g_superslab_heads[class_idx];
|
|
if (!head) {
|
|
return NULL;
|
|
}
|
|
|
|
uintptr_t ptr_addr = (uintptr_t)ptr;
|
|
|
|
SuperSlab* chunk = head->first_chunk;
|
|
while (chunk) {
|
|
uintptr_t chunk_start = (uintptr_t)chunk;
|
|
size_t chunk_size = (size_t)1 << chunk->lg_size;
|
|
uintptr_t chunk_end = chunk_start + chunk_size;
|
|
|
|
if (ptr_addr >= chunk_start && ptr_addr < chunk_end) {
|
|
return chunk;
|
|
}
|
|
|
|
chunk = chunk->next_chunk;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
// Remove SuperSlab from Legacy Backend list (for safe deallocation)
|
|
void remove_superslab_from_legacy_head(SuperSlab* ss) {
|
|
if (!ss) return;
|
|
|
|
for (int i = 0; i < TINY_NUM_CLASSES_SS; i++) {
|
|
SuperSlabHead* head = g_superslab_heads[i];
|
|
if (!head) continue;
|
|
|
|
pthread_mutex_lock(&head->expansion_lock);
|
|
|
|
if (head->first_chunk == ss) {
|
|
head->first_chunk = ss->next_chunk;
|
|
if (head->current_chunk == ss) head->current_chunk = head->first_chunk;
|
|
atomic_fetch_sub_explicit(&head->total_chunks, 1, memory_order_relaxed);
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
return;
|
|
}
|
|
|
|
SuperSlab* prev = head->first_chunk;
|
|
while (prev && prev->next_chunk) {
|
|
if (prev->next_chunk == ss) {
|
|
prev->next_chunk = ss->next_chunk;
|
|
if (head->current_chunk == ss) head->current_chunk = prev;
|
|
atomic_fetch_sub_explicit(&head->total_chunks, 1, memory_order_relaxed);
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
return;
|
|
}
|
|
prev = prev->next_chunk;
|
|
}
|
|
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
}
|
|
}
|
|
|