Add: Allocation path tracking for debugging

Added HAK_RET_ALLOC_BLOCK_TRACED macro with path identifiers:
- ALLOC_PATH_BACKEND (1): SuperSlab backend allocation
- ALLOC_PATH_TLS_POP (2): TLS SLL pop
- ALLOC_PATH_CARVE (3): Linear carve
- ALLOC_PATH_FREELIST (4): Freelist pop
- ALLOC_PATH_HOTMAG (5): Hot magazine
- ALLOC_PATH_FASTCACHE (6): Fast cache
- ALLOC_PATH_BUMP (7): Bump allocator
- ALLOC_PATH_REFILL (8): Refill/adoption

Usage:
  HAKMEM_ALLOC_PATH_TRACE=1 ./larson_hakmem ...

Logs first 20 allocations with path ID for debugging.

Updated SuperSlab backend to use traced version.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Moe Charm (CI)
2025-11-29 05:38:30 +09:00
parent 5582cbc22c
commit d5645ec42d
2 changed files with 42 additions and 3 deletions

View File

@ -26,6 +26,21 @@
#include <pthread.h>
#include <unistd.h>
// ============================================================================
// Allocation Path Tracking
// ============================================================================
// Allocation path identifiers for debugging
#define ALLOC_PATH_UNKNOWN 0
#define ALLOC_PATH_BACKEND 1 // SuperSlab backend (legacy/shared)
#define ALLOC_PATH_TLS_POP 2 // TLS SLL pop
#define ALLOC_PATH_CARVE 3 // Linear carve from slab
#define ALLOC_PATH_FREELIST 4 // Freelist pop
#define ALLOC_PATH_HOTMAG 5 // Hot magazine
#define ALLOC_PATH_FASTCACHE 6 // Fast cache
#define ALLOC_PATH_BUMP 7 // Bump allocator
#define ALLOC_PATH_REFILL 8 // Refill/adoption
// ============================================================================
// Unified Return Macros
// ============================================================================
@ -42,6 +57,30 @@
return (void*)(base_ptr)
#endif
// HAK_RET_ALLOC_BLOCK_TRACED - Same as above but with path tracking
// Enabled via HAKMEM_ALLOC_PATH_TRACE=1
#if HAKMEM_TINY_HEADER_CLASSIDX
#define HAK_RET_ALLOC_BLOCK_TRACED(cls, base_ptr, path_id) do { \
static int g_trace_enabled = -1; \
if (__builtin_expect(g_trace_enabled == -1, 0)) { \
const char* env = getenv("HAKMEM_ALLOC_PATH_TRACE"); \
g_trace_enabled = (env && *env && *env != '0') ? 1 : 0; \
} \
if (__builtin_expect(g_trace_enabled, 0)) { \
static _Atomic uint32_t g_trace_count = 0; \
uint32_t n = atomic_fetch_add(&g_trace_count, 1); \
if (n < 20) { \
fprintf(stderr, "[ALLOC_PATH] cls=%d path=%d base=%p\n", \
(cls), (path_id), (void*)(base_ptr)); \
} \
} \
return tiny_region_id_write_header((base_ptr), (cls)); \
} while(0)
#else
#define HAK_RET_ALLOC_BLOCK_TRACED(cls, base_ptr, path_id) \
return (void*)(base_ptr)
#endif
// ============================================================================
// Global Variables (defined in superslab_stats.c)
// ============================================================================

View File

@ -62,7 +62,7 @@ void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
meta->used++;
atomic_fetch_add_explicit(&chunk->total_active_blocks, 1, memory_order_relaxed);
HAK_RET_ALLOC_BLOCK(class_idx, base);
HAK_RET_ALLOC_BLOCK_TRACED(class_idx, base, ALLOC_PATH_BACKEND);
}
}
chunk = chunk->next_chunk;
@ -102,7 +102,7 @@ void* hak_tiny_alloc_superslab_backend_legacy(int class_idx)
meta->used++;
atomic_fetch_add_explicit(&new_chunk->total_active_blocks, 1, memory_order_relaxed);
HAK_RET_ALLOC_BLOCK(class_idx, base);
HAK_RET_ALLOC_BLOCK_TRACED(class_idx, base, ALLOC_PATH_BACKEND);
}
}
@ -205,7 +205,7 @@ void* hak_tiny_alloc_superslab_backend_shared(int class_idx)
meta->used++;
atomic_fetch_add_explicit(&ss->total_active_blocks, 1, memory_order_relaxed);
HAK_RET_ALLOC_BLOCK(class_idx, base);
HAK_RET_ALLOC_BLOCK_TRACED(class_idx, base, ALLOC_PATH_BACKEND);
}
/*