Files
hakmem/archive/hakmem_tiny_legacy_slow_box.inc
Moe Charm (CI) 25cb7164c7 Comprehensive legacy cleanup and architecture consolidation
Summary of Changes:

MOVED TO ARCHIVE:
- core/hakmem_tiny_legacy_slow_box.inc → archive/
  * Slow path legacy code preserved for reference
  * Superseded by Gatekeeper Box architecture

- core/superslab_allocate.c → archive/superslab_allocate_legacy.c
  * Legacy SuperSlab allocation implementation
  * Functionality integrated into new Box system

- core/superslab_head.c → archive/superslab_head_legacy.c
  * Legacy slab head management
  * Refactored through Box architecture

REMOVED DEAD CODE:
- Eliminated unused allocation policy variants from ss_allocation_box.c
  * Reduced from 127+ lines of conditional logic to focused implementation
  * Removed: old policy branches, unused allocation strategies
  * Kept: current Box-based allocation path

ADDED NEW INFRASTRUCTURE:
- core/superslab_head_stub.c (41 lines)
  * Minimal stub for backward compatibility
  * Delegates to new architecture

- Enhanced core/superslab_cache.c (75 lines added)
  * Added missing API functions for cache management
  * Proper interface for SuperSlab cache integration

REFACTORED CORE SYSTEMS:
- core/hakmem_super_registry.c
  * Moved registration logic from scattered locations
  * Centralized SuperSlab registry management

- core/hakmem_tiny.c
  * Removed 27 lines of redundant initialization
  * Simplified through Box architecture

- core/hakmem_tiny_alloc.inc
  * Streamlined allocation path to use Gatekeeper
  * Removed legacy decision logic

- core/box/ss_allocation_box.c/h
  * Dramatically simplified allocation policy
  * Removed conditional branches for unused strategies
  * Focused on current Box-based approach

BUILD SYSTEM:
- Updated Makefile for archive structure
- Removed obsolete object file references
- Maintained build compatibility

SAFETY & TESTING:
- All deletions verified: no broken references
- Build verification: RELEASE=0 and RELEASE=1 pass
- Smoke tests: 100% pass rate
- Functional verification: allocation/free intact

Architecture Consolidation:
Before: Multiple overlapping allocation paths with legacy code branches
After:  Single unified path through Gatekeeper Boxes with clear architecture

Benefits:
- Reduced code size and complexity
- Improved maintainability
- Single source of truth for allocation logic
- Better diagnostic/observability hooks
- Foundation for future optimizations

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-04 14:22:48 +09:00

102 lines
3.4 KiB
C++

// Archived legacy slow allocation path for Tiny pool.
// Not compiled by default; kept for reference / A/B rollback.
// Source moved from core/hakmem_tiny_legacy_slow_box.inc after Box refactor cleanup (2025-12-04).
static __attribute__((cold, noinline, unused)) void* tiny_slow_alloc_fast(int class_idx) {
int tls_enabled = g_tls_list_enable;
TinyTLSList* tls = &g_tls_lists[class_idx];
pthread_mutex_t* lock = &g_tiny_class_locks[class_idx].m;
pthread_mutex_lock(lock);
TinySlab* slab = g_tiny_pool.free_slabs[class_idx];
if (slab) {
g_tiny_pool.free_slabs[class_idx] = slab->next;
} else {
slab = allocate_new_slab(class_idx);
if (!slab) {
pthread_mutex_unlock(lock);
return NULL;
}
}
slab->next = NULL;
if (atomic_load_explicit(&slab->remote_head, memory_order_acquire)) {
tiny_remote_drain_locked(slab);
}
int block_idx = hak_tiny_find_free_block(slab);
if (block_idx < 0) {
slab->next = g_tiny_pool.free_slabs[class_idx];
g_tiny_pool.free_slabs[class_idx] = slab;
pthread_mutex_unlock(lock);
return NULL;
}
hak_tiny_set_used(slab, block_idx);
slab->free_count--;
size_t block_size = g_tiny_class_sizes[class_idx];
uint8_t* base = (uint8_t*)slab->base;
void* ret = (void*)(base + ((size_t)block_idx * block_size));
g_tiny_pool.alloc_count[class_idx]++;
uint16_t cap = g_fast_cap_defaults[class_idx];
uint16_t count = g_fast_count[class_idx];
uint16_t fast_need = (cap > count) ? (uint16_t)(cap - count) : 0;
if (fast_need > slab->free_count) fast_need = (uint16_t)slab->free_count;
uint32_t tls_need = 0;
if (tls_enabled && tls_list_needs_refill(tls)) {
uint32_t target = tls_list_refill_threshold(tls);
if (tls->count < target) {
tls_need = target - tls->count;
}
}
uint32_t remaining = slab->free_count;
if (fast_need > remaining) fast_need = (uint16_t)remaining;
remaining -= fast_need;
if (tls_need > remaining) tls_need = remaining;
while (fast_need > 0) {
int extra_idx = hak_tiny_find_free_block(slab);
if (extra_idx < 0) break;
hak_tiny_set_used(slab, extra_idx);
slab->free_count--;
void* extra = (void*)(base + ((size_t)extra_idx * block_size));
int pushed = 0;
if (__builtin_expect(g_fastcache_enable && class_idx <= 3, 1)) {
pushed = fastcache_push(class_idx, HAK_BASE_FROM_RAW(extra));
} else {
pushed = tiny_fast_push(class_idx, HAK_BASE_FROM_RAW(extra));
}
if (!pushed) {
if (tls_enabled) {
tiny_tls_list_guard_push(class_idx, tls, extra);
tls_list_push(tls, extra, class_idx);
}
}
fast_need--;
}
while (tls_enabled && tls_need > 0) {
int extra_idx = hak_tiny_find_free_block(slab);
if (extra_idx < 0) break;
hak_tiny_set_used(slab, extra_idx);
slab->free_count--;
void* extra = (void*)(base + ((size_t)extra_idx * block_size));
tiny_tls_list_guard_push(class_idx, tls, extra);
tls_list_push(tls, extra, class_idx);
tls_need--;
}
if (slab->free_count == 0) {
move_to_full_list(class_idx, slab);
} else {
slab->next = g_tiny_pool.free_slabs[class_idx];
g_tiny_pool.free_slabs[class_idx] = slab;
}
pthread_mutex_unlock(lock);
return ret;
}