Comprehensive legacy cleanup and architecture consolidation
Summary of Changes: MOVED TO ARCHIVE: - core/hakmem_tiny_legacy_slow_box.inc → archive/ * Slow path legacy code preserved for reference * Superseded by Gatekeeper Box architecture - core/superslab_allocate.c → archive/superslab_allocate_legacy.c * Legacy SuperSlab allocation implementation * Functionality integrated into new Box system - core/superslab_head.c → archive/superslab_head_legacy.c * Legacy slab head management * Refactored through Box architecture REMOVED DEAD CODE: - Eliminated unused allocation policy variants from ss_allocation_box.c * Reduced from 127+ lines of conditional logic to focused implementation * Removed: old policy branches, unused allocation strategies * Kept: current Box-based allocation path ADDED NEW INFRASTRUCTURE: - core/superslab_head_stub.c (41 lines) * Minimal stub for backward compatibility * Delegates to new architecture - Enhanced core/superslab_cache.c (75 lines added) * Added missing API functions for cache management * Proper interface for SuperSlab cache integration REFACTORED CORE SYSTEMS: - core/hakmem_super_registry.c * Moved registration logic from scattered locations * Centralized SuperSlab registry management - core/hakmem_tiny.c * Removed 27 lines of redundant initialization * Simplified through Box architecture - core/hakmem_tiny_alloc.inc * Streamlined allocation path to use Gatekeeper * Removed legacy decision logic - core/box/ss_allocation_box.c/h * Dramatically simplified allocation policy * Removed conditional branches for unused strategies * Focused on current Box-based approach BUILD SYSTEM: - Updated Makefile for archive structure - Removed obsolete object file references - Maintained build compatibility SAFETY & TESTING: - All deletions verified: no broken references - Build verification: RELEASE=0 and RELEASE=1 pass - Smoke tests: 100% pass rate - Functional verification: allocation/free intact Architecture Consolidation: Before: Multiple overlapping allocation paths with legacy code branches After: Single unified path through Gatekeeper Boxes with clear architecture Benefits: - Reduced code size and complexity - Improved maintainability - Single source of truth for allocation logic - Better diagnostic/observability hooks - Foundation for future optimizations 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@ -202,3 +202,78 @@ int ss_cache_push(uint8_t size_class, SuperSlab* ss) {
|
||||
pthread_mutex_unlock(&g_ss_cache_lock[size_class]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Precharge Configuration API
|
||||
// ============================================================================
|
||||
|
||||
void tiny_ss_precharge_set_class_target(int class_idx, size_t target) {
|
||||
if (class_idx < 0 || class_idx >= 8) {
|
||||
return;
|
||||
}
|
||||
|
||||
ss_cache_ensure_init();
|
||||
pthread_mutex_lock(&g_ss_cache_lock[class_idx]);
|
||||
|
||||
g_ss_precharge_target[class_idx] = target;
|
||||
if (target > 0) {
|
||||
g_ss_cache_enabled = 1;
|
||||
atomic_store_explicit(&g_ss_precharge_done[class_idx], 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&g_ss_cache_lock[class_idx]);
|
||||
}
|
||||
|
||||
void tiny_ss_cache_set_class_cap(int class_idx, size_t new_cap) {
|
||||
if (class_idx < 0 || class_idx >= 8) {
|
||||
return;
|
||||
}
|
||||
|
||||
ss_cache_ensure_init();
|
||||
pthread_mutex_lock(&g_ss_cache_lock[class_idx]);
|
||||
|
||||
size_t old_cap = g_ss_cache_cap[class_idx];
|
||||
g_ss_cache_cap[class_idx] = new_cap;
|
||||
|
||||
// If shrinking cap, drop extra cached superslabs (oldest from head) and munmap them.
|
||||
if (new_cap == 0 || new_cap < old_cap) {
|
||||
while (g_ss_cache_count[class_idx] > new_cap) {
|
||||
SuperslabCacheEntry* entry = g_ss_cache_head[class_idx];
|
||||
if (!entry) {
|
||||
g_ss_cache_count[class_idx] = 0;
|
||||
break;
|
||||
}
|
||||
g_ss_cache_head[class_idx] = entry->next;
|
||||
g_ss_cache_count[class_idx]--;
|
||||
g_ss_cache_drops[class_idx]++;
|
||||
|
||||
// Convert cache entry back to SuperSlab* and release it to OS.
|
||||
SuperSlab* ss = (SuperSlab*)entry;
|
||||
size_t ss_size = (size_t)1 << ss->lg_size;
|
||||
munmap((void*)ss, ss_size);
|
||||
|
||||
// Update global stats to keep accounting consistent.
|
||||
extern pthread_mutex_t g_superslab_lock; // From ss_stats_box.c
|
||||
pthread_mutex_lock(&g_superslab_lock);
|
||||
g_superslabs_freed++;
|
||||
if (g_bytes_allocated >= ss_size) {
|
||||
g_bytes_allocated -= ss_size;
|
||||
} else {
|
||||
g_bytes_allocated = 0;
|
||||
}
|
||||
pthread_mutex_unlock(&g_superslab_lock);
|
||||
}
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&g_ss_cache_lock[class_idx]);
|
||||
|
||||
// Recompute cache enabled flag (8 classes, so O(8) is cheap)
|
||||
int enabled = 0;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
if (g_ss_cache_cap[i] > 0 || g_ss_precharge_target[i] > 0) {
|
||||
enabled = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
g_ss_cache_enabled = enabled;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user