This commit introduces a comprehensive tracing mechanism for allocation failures within the Adaptive Cache Engine (ACE) component. This feature allows for precise identification of the root cause for Out-Of-Memory (OOM) issues related to ACE allocations. Key changes include: - **ACE Tracing Implementation**: - Added environment variable to enable/disable detailed logging of allocation failures. - Instrumented , , and to distinguish between "Threshold" (size class mismatch), "Exhaustion" (pool depletion), and "MapFail" (OS memory allocation failure). - **Build System Fixes**: - Corrected to ensure is properly linked into , resolving an error. - **LD_PRELOAD Wrapper Adjustments**: - Investigated and understood the wrapper's behavior under , particularly its interaction with and checks. - Enabled debugging flags for environment to prevent unintended fallbacks to 's for non-tiny allocations, allowing comprehensive testing of the allocator. - **Debugging & Verification**: - Introduced temporary verbose logging to pinpoint execution flow issues within interception and routing. These temporary logs have been removed. - Created to facilitate testing of the tracing features. This feature will significantly aid in diagnosing and resolving allocation-related OOM issues in by providing clear insights into the failure pathways.
109 lines
4.0 KiB
C
109 lines
4.0 KiB
C
// Archived legacy backend for hak_tiny_alloc_superslab_box().
|
|
// Not compiled by default; kept for reference/A-B restore.
|
|
// Source moved from core/superslab_backend.c after legacy path removal.
|
|
|
|
#include "../core/hakmem_tiny_superslab_internal.h"
|
|
|
|
void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
|
|
{
|
|
if (class_idx < 0 || class_idx >= TINY_NUM_CLASSES_SS) {
|
|
return NULL;
|
|
}
|
|
|
|
SuperSlabHead* head = g_superslab_heads[class_idx];
|
|
if (!head) {
|
|
head = init_superslab_head(class_idx);
|
|
if (!head) {
|
|
return NULL;
|
|
}
|
|
g_superslab_heads[class_idx] = head;
|
|
}
|
|
|
|
// LOCK expansion_lock to protect list traversal (vs remove_superslab_from_legacy_head)
|
|
pthread_mutex_lock(&head->expansion_lock);
|
|
|
|
SuperSlab* chunk = head->current_chunk ? head->current_chunk : head->first_chunk;
|
|
|
|
while (chunk) {
|
|
int cap = ss_slabs_capacity(chunk);
|
|
for (int slab_idx = 0; slab_idx < cap; slab_idx++) {
|
|
TinySlabMeta* meta = &chunk->slabs[slab_idx];
|
|
|
|
// Skip slabs that belong to a different class (or are uninitialized).
|
|
if (meta->class_idx != (uint8_t)class_idx && meta->class_idx != 255) {
|
|
continue;
|
|
}
|
|
|
|
// Initialize slab on first use to populate class_map.
|
|
if (meta->capacity == 0) {
|
|
size_t block_size = g_tiny_class_sizes[class_idx];
|
|
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
|
|
superslab_init_slab(chunk, slab_idx, block_size, owner_tid);
|
|
meta = &chunk->slabs[slab_idx];
|
|
meta->class_idx = (uint8_t)class_idx;
|
|
chunk->class_map[slab_idx] = (uint8_t)class_idx;
|
|
}
|
|
|
|
if (meta->used < meta->capacity) {
|
|
size_t stride = tiny_block_stride_for_class(class_idx);
|
|
size_t offset = (size_t)meta->used * stride;
|
|
uint8_t* base = (uint8_t*)chunk
|
|
+ SUPERSLAB_SLAB0_DATA_OFFSET
|
|
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE
|
|
+ offset;
|
|
|
|
meta->used++;
|
|
atomic_fetch_add_explicit(&chunk->total_active_blocks, 1, memory_order_relaxed);
|
|
|
|
// UNLOCK before return
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
|
|
HAK_RET_ALLOC_BLOCK_TRACED(class_idx, base, ALLOC_PATH_BACKEND);
|
|
}
|
|
}
|
|
chunk = chunk->next_chunk;
|
|
}
|
|
|
|
// UNLOCK before expansion (which takes lock internally)
|
|
pthread_mutex_unlock(&head->expansion_lock);
|
|
|
|
if (expand_superslab_head(head) < 0) {
|
|
return NULL;
|
|
}
|
|
|
|
SuperSlab* new_chunk = head->current_chunk;
|
|
if (!new_chunk) {
|
|
return NULL;
|
|
}
|
|
|
|
int cap2 = ss_slabs_capacity(new_chunk);
|
|
for (int slab_idx = 0; slab_idx < cap2; slab_idx++) {
|
|
TinySlabMeta* meta = &new_chunk->slabs[slab_idx];
|
|
|
|
// Initialize slab on first use to populate class_map.
|
|
if (meta->capacity == 0) {
|
|
size_t block_size = g_tiny_class_sizes[class_idx];
|
|
uint32_t owner_tid = (uint32_t)(uintptr_t)pthread_self();
|
|
superslab_init_slab(new_chunk, slab_idx, block_size, owner_tid);
|
|
meta = &new_chunk->slabs[slab_idx];
|
|
meta->class_idx = (uint8_t)class_idx;
|
|
new_chunk->class_map[slab_idx] = (uint8_t)class_idx;
|
|
}
|
|
|
|
if (meta->used < meta->capacity) {
|
|
size_t stride = tiny_block_stride_for_class(class_idx);
|
|
size_t offset = (size_t)meta->used * stride;
|
|
uint8_t* base = (uint8_t*)new_chunk
|
|
+ SUPERSLAB_SLAB0_DATA_OFFSET
|
|
+ (size_t)slab_idx * SUPERSLAB_SLAB_USABLE_SIZE
|
|
+ offset;
|
|
|
|
meta->used++;
|
|
atomic_fetch_add_explicit(&new_chunk->total_active_blocks, 1, memory_order_relaxed);
|
|
HAK_RET_ALLOC_BLOCK_TRACED(class_idx, base, ALLOC_PATH_BACKEND);
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|