This commit introduces a comprehensive tracing mechanism for allocation failures within the Adaptive Cache Engine (ACE) component. This feature allows for precise identification of the root cause for Out-Of-Memory (OOM) issues related to ACE allocations. Key changes include: - **ACE Tracing Implementation**: - Added environment variable to enable/disable detailed logging of allocation failures. - Instrumented , , and to distinguish between "Threshold" (size class mismatch), "Exhaustion" (pool depletion), and "MapFail" (OS memory allocation failure). - **Build System Fixes**: - Corrected to ensure is properly linked into , resolving an error. - **LD_PRELOAD Wrapper Adjustments**: - Investigated and understood the wrapper's behavior under , particularly its interaction with and checks. - Enabled debugging flags for environment to prevent unintended fallbacks to 's for non-tiny allocations, allowing comprehensive testing of the allocator. - **Debugging & Verification**: - Introduced temporary verbose logging to pinpoint execution flow issues within interception and routing. These temporary logs have been removed. - Created to facilitate testing of the tracing features. This feature will significantly aid in diagnosing and resolving allocation-related OOM issues in by providing clear insights into the failure pathways.
187 lines
7.7 KiB
C
187 lines
7.7 KiB
C
// hakmem_tiny_superslab_internal.h - Internal declarations for superslab refactor
|
|
// Purpose: Shared declarations between superslab implementation files
|
|
// License: MIT
|
|
// Date: 2025-11-28
|
|
|
|
#ifndef HAKMEM_TINY_SUPERSLAB_INTERNAL_H
|
|
#define HAKMEM_TINY_SUPERSLAB_INTERNAL_H
|
|
|
|
#include "hakmem_build_flags.h" // CRITICAL: Ensure HAKMEM_TINY_USE_SUPERSLAB is defined
|
|
#include "hakmem_tiny_superslab.h"
|
|
#include "box/ss_hot_cold_box.h"
|
|
#include "box/ss_allocation_box.h" // CRITICAL: For superslab_allocate() declaration (fixes implicit int assumption)
|
|
#include "hakmem_super_registry.h"
|
|
#include "hakmem_debug_master.h" // For unified debug level control
|
|
#include "hakmem_tiny.h"
|
|
#include "hakmem_tiny_config.h"
|
|
#include "hakmem_shared_pool.h"
|
|
#include "hakmem_internal.h"
|
|
#include "tiny_region_id.h"
|
|
#include "hakmem_tiny_integrity.h"
|
|
#include "box/tiny_next_ptr_box.h"
|
|
#include "box/slab_freelist_atomic.h"
|
|
#include <sys/mman.h>
|
|
#include <sys/resource.h>
|
|
#include <errno.h>
|
|
#include <string.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <pthread.h>
|
|
#include <unistd.h>
|
|
|
|
// ============================================================================
|
|
// Allocation Path Tracking
|
|
// ============================================================================
|
|
|
|
// Allocation path identifiers for debugging
|
|
#define ALLOC_PATH_UNKNOWN 0
|
|
#define ALLOC_PATH_BACKEND 1 // SuperSlab backend (legacy/shared)
|
|
#define ALLOC_PATH_TLS_POP 2 // TLS SLL pop
|
|
#define ALLOC_PATH_CARVE 3 // Linear carve from slab
|
|
#define ALLOC_PATH_FREELIST 4 // Freelist pop
|
|
#define ALLOC_PATH_HOTMAG 5 // Hot magazine
|
|
#define ALLOC_PATH_FASTCACHE 6 // Fast cache
|
|
#define ALLOC_PATH_BUMP 7 // Bump allocator
|
|
#define ALLOC_PATH_REFILL 8 // Refill/adoption
|
|
|
|
// ============================================================================
|
|
// Unified Return Macros
|
|
// ============================================================================
|
|
|
|
// HAK_RET_ALLOC_BLOCK - Single exit point for SuperSlab allocations
|
|
// Purpose: Ensures consistent header writing across all SuperSlab allocation paths
|
|
// Usage: HAK_RET_ALLOC_BLOCK(class_idx, base_ptr);
|
|
// Note: Must be used in function context (macro contains return statement)
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
#define HAK_RET_ALLOC_BLOCK(cls, base_ptr) \
|
|
return tiny_region_id_write_header((base_ptr), (cls))
|
|
#else
|
|
#define HAK_RET_ALLOC_BLOCK(cls, base_ptr) \
|
|
return (void*)(base_ptr)
|
|
#endif
|
|
|
|
// HAK_RET_ALLOC_BLOCK_TRACED - Same as above but with path tracking
|
|
// Enabled via HAKMEM_DEBUG_LEVEL >= 3 (INFO level)
|
|
// Legacy: HAKMEM_ALLOC_PATH_TRACE=1 still works for compatibility
|
|
#if HAKMEM_TINY_HEADER_CLASSIDX
|
|
#define HAK_RET_ALLOC_BLOCK_TRACED(cls, base_ptr, path_id) do { \
|
|
static int g_trace_enabled = -1; \
|
|
if (__builtin_expect(g_trace_enabled == -1, 0)) { \
|
|
g_trace_enabled = hak_debug_check_level("HAKMEM_ALLOC_PATH_TRACE", 3); \
|
|
} \
|
|
if (__builtin_expect(g_trace_enabled, 0)) { \
|
|
static _Atomic uint32_t g_trace_count = 0; \
|
|
uint32_t n = atomic_fetch_add(&g_trace_count, 1); \
|
|
if (n < 20) { \
|
|
fprintf(stderr, "[ALLOC_PATH] cls=%d path=%d base=%p\n", \
|
|
(cls), (path_id), (void*)(base_ptr)); \
|
|
} \
|
|
} \
|
|
return tiny_region_id_write_header((base_ptr), (cls)); \
|
|
} while(0)
|
|
#else
|
|
#define HAK_RET_ALLOC_BLOCK_TRACED(cls, base_ptr, path_id) \
|
|
return (void*)(base_ptr)
|
|
#endif
|
|
|
|
// ============================================================================
|
|
// Global Variables (defined in superslab_stats.c)
|
|
// ============================================================================
|
|
|
|
extern pthread_mutex_t g_superslab_lock;
|
|
extern uint64_t g_superslabs_allocated;
|
|
extern uint64_t g_superslabs_freed;
|
|
extern uint64_t g_bytes_allocated;
|
|
extern _Atomic uint64_t g_ss_active_dec_calls;
|
|
extern _Atomic uint64_t g_hak_tiny_free_calls;
|
|
extern _Atomic uint64_t g_ss_remote_push_calls;
|
|
extern _Atomic uint64_t g_free_ss_enter;
|
|
extern _Atomic uint64_t g_free_local_box_calls;
|
|
extern _Atomic uint64_t g_free_remote_box_calls;
|
|
extern uint64_t g_ss_alloc_by_class[8];
|
|
extern uint64_t g_ss_freed_by_class[8];
|
|
extern _Atomic uint64_t g_ss_mmap_count;
|
|
extern _Atomic uint64_t g_final_fallback_mmap_count;
|
|
|
|
// ============================================================================
|
|
// SuperSlabHead Management (defined in superslab_head.c)
|
|
// ============================================================================
|
|
|
|
extern SuperSlabHead* g_superslab_heads[TINY_NUM_CLASSES_SS];
|
|
|
|
// ============================================================================
|
|
// Cache System (defined in superslab_cache.c)
|
|
// ============================================================================
|
|
|
|
typedef struct SuperslabCacheEntry {
|
|
struct SuperslabCacheEntry* next;
|
|
} SuperslabCacheEntry;
|
|
|
|
extern SuperslabCacheEntry* g_ss_cache_head[8];
|
|
extern size_t g_ss_cache_count[8];
|
|
extern size_t g_ss_cache_cap[8];
|
|
extern size_t g_ss_precharge_target[8];
|
|
extern _Atomic int g_ss_precharge_done[8];
|
|
extern int g_ss_cache_enabled;
|
|
extern pthread_once_t g_ss_cache_once;
|
|
extern pthread_mutex_t g_ss_cache_lock[8];
|
|
extern uint64_t g_ss_cache_hits[8];
|
|
extern uint64_t g_ss_cache_misses[8];
|
|
extern uint64_t g_ss_cache_puts[8];
|
|
extern uint64_t g_ss_cache_drops[8];
|
|
extern uint64_t g_ss_cache_precharged[8];
|
|
extern uint64_t g_superslabs_reused;
|
|
extern uint64_t g_superslabs_cached;
|
|
|
|
// Cache functions (defined in superslab_cache.c)
|
|
void ss_cache_global_init(void);
|
|
void ss_cache_ensure_init(void);
|
|
void* ss_os_acquire(uint8_t size_class, size_t ss_size, uintptr_t ss_mask, int populate);
|
|
void ss_cache_precharge(uint8_t size_class, size_t ss_size, uintptr_t ss_mask);
|
|
SuperslabCacheEntry* ss_cache_pop(uint8_t size_class);
|
|
int ss_cache_push(uint8_t size_class, SuperSlab* ss);
|
|
|
|
// ============================================================================
|
|
// ACE (Adaptive Cache Engine) - defined in superslab_ace.c
|
|
// ============================================================================
|
|
|
|
extern SuperSlabACEState g_ss_ace[TINY_NUM_CLASSES_SS];
|
|
extern int g_ss_force_lg;
|
|
extern _Atomic int g_ss_populate_once;
|
|
|
|
uint8_t hak_tiny_superslab_next_lg(int class_idx);
|
|
void ace_observe_and_decide(int k);
|
|
|
|
// ============================================================================
|
|
// Statistics (defined in superslab_stats.c)
|
|
// ============================================================================
|
|
|
|
void ss_stats_os_alloc(uint8_t size_class, size_t ss_size);
|
|
void ss_stats_cache_reuse(void);
|
|
void ss_stats_cache_store(void);
|
|
void log_superslab_oom_once(size_t ss_size, size_t alloc_size, int err);
|
|
|
|
// ============================================================================
|
|
// Slab Management (defined in superslab_slab.c)
|
|
// ============================================================================
|
|
|
|
// Drain remote MPSC stack into freelist (ownership already verified by caller)
|
|
void _ss_remote_drain_to_freelist_unsafe(SuperSlab* ss, int slab_idx, TinySlabMeta* meta);
|
|
|
|
// ============================================================================
|
|
// Backend Allocation (defined in superslab_backend.c)
|
|
// ============================================================================
|
|
|
|
void* hak_tiny_alloc_superslab_backend_shared(int class_idx);
|
|
|
|
// ============================================================================
|
|
// SuperSlabHead Management (defined in superslab_head.c)
|
|
// ============================================================================
|
|
|
|
SuperSlabHead* init_superslab_head(int class_idx);
|
|
int expand_superslab_head(SuperSlabHead* head);
|
|
SuperSlab* find_chunk_for_ptr(void* ptr, int class_idx);
|
|
void remove_superslab_from_legacy_head(SuperSlab* ss);
|
|
|
|
#endif // HAKMEM_TINY_SUPERSLAB_INTERNAL_H
|