2025-12-10 19:14:38 +09:00
|
|
|
|
// smallobject_hotbox_v4.c - SmallObject HotHeap v4 (C5/C6/C7 opt-in)
|
2025-12-10 17:58:42 +09:00
|
|
|
|
//
|
2025-12-10 19:14:38 +09:00
|
|
|
|
// Phase v4-3.1: C7 は v4 独自の freelist/current/partial で完結。C6/C5 は強ゲート付きで同形パスを使う。
|
2025-12-10 17:58:42 +09:00
|
|
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
2025-12-11 01:44:08 +09:00
|
|
|
|
#ifndef likely
|
|
|
|
|
|
#define likely(x) __builtin_expect(!!(x), 1)
|
|
|
|
|
|
#define unlikely(x) __builtin_expect(!!(x), 0)
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
#include "box/smallobject_hotbox_v4_box.h"
|
|
|
|
|
|
#include "box/smallobject_hotbox_v4_env_box.h"
|
2025-12-11 01:01:15 +09:00
|
|
|
|
#include "box/smallobject_hotbox_v4_stats_box.h"
|
2025-12-10 17:58:42 +09:00
|
|
|
|
#include "box/smallobject_cold_iface_v4.h"
|
|
|
|
|
|
#include "box/smallobject_hotbox_v3_env_box.h"
|
|
|
|
|
|
#include "box/tiny_heap_box.h"
|
2025-12-10 22:57:26 +09:00
|
|
|
|
#include "box/smallsegment_v4_box.h"
|
|
|
|
|
|
#include "box/smallsegment_v4_env_box.h"
|
2025-12-10 17:58:42 +09:00
|
|
|
|
#include "box/tiny_cold_iface_v1.h"
|
|
|
|
|
|
#include "box/tiny_geometry_box.h"
|
|
|
|
|
|
#include "tiny_region_id.h"
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// ============================================================================
|
|
|
|
|
|
// Stats storage (Phase v4-mid-5)
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
small_heap_v4_class_stats_t g_small_heap_v4_stats[8];
|
|
|
|
|
|
|
2025-12-10 23:37:45 +09:00
|
|
|
|
// ============================================================================
|
|
|
|
|
|
// v4 Segment Configuration (Phase v4-mid-0+)
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
|
|
|
|
|
|
#define SMALL_SEGMENT_V4_SIZE (2 * 1024 * 1024) // 2 MiB segment
|
|
|
|
|
|
#define SMALL_SEGMENT_V4_PAGE_SIZE (64 * 1024) // 64 KiB page
|
|
|
|
|
|
#define SMALL_SEGMENT_V4_MAGIC 0xDEADBEEF
|
|
|
|
|
|
#define SMALL_SEGMENT_V4_PAGE_SHIFT 16 // log2(64KiB)
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
// TLS context
|
|
|
|
|
|
static __thread small_heap_ctx_v4 g_ctx_v4;
|
|
|
|
|
|
|
2025-12-11 01:44:08 +09:00
|
|
|
|
// Phase v4-mid-6: C6 TLS Fastlist
|
|
|
|
|
|
static __thread SmallC6FastState g_small_c6_fast;
|
|
|
|
|
|
|
|
|
|
|
|
static inline SmallC6FastState* small_c6_fast_state(void) {
|
|
|
|
|
|
return &g_small_c6_fast;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 23:23:07 +09:00
|
|
|
|
// Internal segment structure (internal use only, not exposed via public box API)
|
|
|
|
|
|
typedef struct small_segment_v4_internal {
|
2025-12-10 22:57:26 +09:00
|
|
|
|
int class_idx;
|
|
|
|
|
|
size_t segment_size;
|
|
|
|
|
|
tiny_heap_ctx_t* tiny_ctx;
|
2025-12-10 23:23:07 +09:00
|
|
|
|
} small_segment_v4_internal;
|
2025-12-10 22:57:26 +09:00
|
|
|
|
|
2025-12-10 23:23:07 +09:00
|
|
|
|
static __thread small_segment_v4_internal g_segments_v4[SMALLOBJECT_NUM_CLASSES];
|
2025-12-10 22:57:26 +09:00
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
small_heap_ctx_v4* small_heap_ctx_v4_get(void) {
|
|
|
|
|
|
return &g_ctx_v4;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 22:57:26 +09:00
|
|
|
|
static small_page_v4* v4_page_from_lease(tiny_heap_page_t* lease, int class_idx, small_segment_v4* seg);
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
|
// helpers
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
|
|
2025-12-10 19:14:38 +09:00
|
|
|
|
static inline int v4_class_supported(int class_idx) {
|
|
|
|
|
|
return class_idx == 7 || class_idx == 6 || class_idx == 5;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 22:57:26 +09:00
|
|
|
|
static size_t smallsegment_v4_default_size(void) {
|
|
|
|
|
|
const char* env = smallsegment_v4_size_env();
|
|
|
|
|
|
if (env && *env) {
|
|
|
|
|
|
size_t v = strtoull(env, NULL, 0);
|
|
|
|
|
|
if (v > (size_t)(64 * 1024)) {
|
|
|
|
|
|
return v;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
return (size_t)(2 * 1024 * 1024); // default 2MiB segment単位(将来の実装用)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
small_segment_v4* smallsegment_v4_acquire(int class_idx) {
|
|
|
|
|
|
if (!v4_class_supported(class_idx)) return NULL;
|
2025-12-10 23:23:07 +09:00
|
|
|
|
small_segment_v4_internal* seg = &g_segments_v4[class_idx];
|
2025-12-10 22:57:26 +09:00
|
|
|
|
seg->class_idx = class_idx;
|
|
|
|
|
|
if (!seg->segment_size) {
|
|
|
|
|
|
seg->segment_size = smallsegment_v4_default_size();
|
|
|
|
|
|
}
|
|
|
|
|
|
if (!seg->tiny_ctx) {
|
|
|
|
|
|
seg->tiny_ctx = tiny_heap_ctx_for_thread();
|
|
|
|
|
|
}
|
2025-12-10 23:23:07 +09:00
|
|
|
|
return (small_segment_v4*)seg;
|
2025-12-10 22:57:26 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void* smallsegment_v4_alloc_page(small_segment_v4* seg, int class_idx) {
|
|
|
|
|
|
if (!seg || !v4_class_supported(class_idx)) return NULL;
|
2025-12-10 23:23:07 +09:00
|
|
|
|
// Internal use only: cast to internal type to access tiny_ctx
|
|
|
|
|
|
small_segment_v4_internal* int_seg = (small_segment_v4_internal*)seg;
|
|
|
|
|
|
if (!int_seg->tiny_ctx) {
|
|
|
|
|
|
int_seg->tiny_ctx = tiny_heap_ctx_for_thread();
|
2025-12-10 22:57:26 +09:00
|
|
|
|
}
|
2025-12-10 23:23:07 +09:00
|
|
|
|
tiny_heap_ctx_t* tctx = int_seg->tiny_ctx ? int_seg->tiny_ctx : tiny_heap_ctx_for_thread();
|
2025-12-10 22:57:26 +09:00
|
|
|
|
if (!tctx) return NULL;
|
|
|
|
|
|
|
|
|
|
|
|
tiny_heap_page_t* lease = tiny_heap_prepare_page(tctx, class_idx);
|
|
|
|
|
|
if (!lease) return NULL;
|
2025-12-10 23:23:07 +09:00
|
|
|
|
int_seg->tiny_ctx = tctx;
|
2025-12-10 22:57:26 +09:00
|
|
|
|
return v4_page_from_lease(lease, class_idx, seg);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void smallsegment_v4_release_if_empty(small_segment_v4* seg, void* page_ptr, int class_idx) {
|
|
|
|
|
|
small_page_v4* page = (small_page_v4*)page_ptr;
|
|
|
|
|
|
if (!page || !v4_class_supported(class_idx)) return;
|
2025-12-10 23:23:07 +09:00
|
|
|
|
tiny_heap_ctx_t* tctx = tiny_heap_ctx_for_thread();
|
2025-12-10 22:57:26 +09:00
|
|
|
|
tiny_heap_page_t* lease = (tiny_heap_page_t*)page->slab_ref;
|
|
|
|
|
|
if (tctx && lease) {
|
|
|
|
|
|
tiny_heap_page_becomes_empty(tctx, class_idx, lease);
|
|
|
|
|
|
}
|
|
|
|
|
|
free(page);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
static inline void v4_page_push_partial(small_class_heap_v4* h, small_page_v4* page) {
|
|
|
|
|
|
if (!h || !page) return;
|
|
|
|
|
|
page->next = h->partial_head;
|
|
|
|
|
|
h->partial_head = page;
|
|
|
|
|
|
h->partial_count++;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline small_page_v4* v4_page_pop_partial(small_class_heap_v4* h) {
|
|
|
|
|
|
if (!h) return NULL;
|
|
|
|
|
|
small_page_v4* p = h->partial_head;
|
|
|
|
|
|
if (p) {
|
|
|
|
|
|
h->partial_head = p->next;
|
|
|
|
|
|
p->next = NULL;
|
|
|
|
|
|
if (h->partial_count > 0) {
|
|
|
|
|
|
h->partial_count--;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
return p;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline void v4_page_push_full(small_class_heap_v4* h, small_page_v4* page) {
|
|
|
|
|
|
if (!h || !page) return;
|
|
|
|
|
|
page->next = h->full_head;
|
|
|
|
|
|
h->full_head = page;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 19:14:38 +09:00
|
|
|
|
static inline uint32_t v4_partial_limit(int class_idx) {
|
|
|
|
|
|
// C7 は refill/retire を抑えるため少し広めに保持
|
|
|
|
|
|
return (class_idx == 7) ? 2u : 1u;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
static inline int v4_ptr_in_page(const small_page_v4* page, const uint8_t* ptr) {
|
|
|
|
|
|
if (!page || !ptr) return 0;
|
|
|
|
|
|
uint8_t* base = page->base;
|
|
|
|
|
|
size_t span = (size_t)page->block_size * (size_t)page->capacity;
|
|
|
|
|
|
if (ptr < base || ptr >= base + span) return 0;
|
|
|
|
|
|
size_t off = (size_t)(ptr - base);
|
|
|
|
|
|
return (off % page->block_size) == 0;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline void* v4_build_freelist(uint8_t* base, uint16_t capacity, size_t stride) {
|
|
|
|
|
|
void* head = NULL;
|
|
|
|
|
|
for (int i = capacity - 1; i >= 0; i--) {
|
|
|
|
|
|
uint8_t* blk = base + ((size_t)i * stride);
|
|
|
|
|
|
void* next = head;
|
|
|
|
|
|
head = blk;
|
|
|
|
|
|
memcpy(blk, &next, sizeof(void*));
|
|
|
|
|
|
}
|
|
|
|
|
|
return head;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
|
|
V4_LOC_NONE = 0,
|
|
|
|
|
|
V4_LOC_CURRENT,
|
|
|
|
|
|
V4_LOC_PARTIAL,
|
|
|
|
|
|
V4_LOC_FULL,
|
|
|
|
|
|
} v4_loc_t;
|
|
|
|
|
|
|
|
|
|
|
|
static small_page_v4* v4_find_page(small_class_heap_v4* h, const uint8_t* ptr, v4_loc_t* loc, small_page_v4** prev_out) {
|
|
|
|
|
|
if (loc) *loc = V4_LOC_NONE;
|
|
|
|
|
|
if (prev_out) *prev_out = NULL;
|
|
|
|
|
|
if (!h || !ptr) return NULL;
|
|
|
|
|
|
|
|
|
|
|
|
if (h->current && v4_ptr_in_page(h->current, ptr)) {
|
|
|
|
|
|
if (loc) *loc = V4_LOC_CURRENT;
|
|
|
|
|
|
return h->current;
|
|
|
|
|
|
}
|
|
|
|
|
|
small_page_v4* prev = NULL;
|
|
|
|
|
|
for (small_page_v4* p = h->partial_head; p; prev = p, p = p->next) {
|
|
|
|
|
|
if (v4_ptr_in_page(p, ptr)) {
|
|
|
|
|
|
if (loc) *loc = V4_LOC_PARTIAL;
|
|
|
|
|
|
if (prev_out) *prev_out = prev;
|
|
|
|
|
|
return p;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-12-10 19:14:38 +09:00
|
|
|
|
for (small_page_v4* p = h->full_head; p; prev = p, p = p->next) {
|
|
|
|
|
|
if (v4_ptr_in_page(p, ptr)) {
|
|
|
|
|
|
if (loc) *loc = V4_LOC_FULL;
|
|
|
|
|
|
if (prev_out) *prev_out = prev;
|
|
|
|
|
|
return p;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-12-10 17:58:42 +09:00
|
|
|
|
return NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 19:14:38 +09:00
|
|
|
|
int smallobject_hotbox_v4_can_own(int class_idx, void* ptr) {
|
|
|
|
|
|
if (__builtin_expect(!v4_class_supported(class_idx), 0)) return 0;
|
|
|
|
|
|
if (!small_heap_v4_class_enabled((uint8_t)class_idx)) return 0;
|
|
|
|
|
|
if (!ptr) return 0;
|
|
|
|
|
|
small_heap_ctx_v4* ctx = small_heap_ctx_v4_get();
|
|
|
|
|
|
if (!ctx) return 0;
|
|
|
|
|
|
small_class_heap_v4* h = &ctx->cls[class_idx];
|
|
|
|
|
|
return v4_find_page(h, (const uint8_t*)ptr, NULL, NULL) != NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
// -----------------------------------------------------------------------------
|
2025-12-10 19:14:38 +09:00
|
|
|
|
// Cold iface (C5/C6/C7, Tiny v1 経由)
|
2025-12-10 17:58:42 +09:00
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
|
|
2025-12-10 22:57:26 +09:00
|
|
|
|
static small_page_v4* v4_page_from_lease(tiny_heap_page_t* lease, int class_idx, small_segment_v4* seg) {
|
2025-12-10 17:58:42 +09:00
|
|
|
|
if (!lease) return NULL;
|
|
|
|
|
|
small_page_v4* page = (small_page_v4*)malloc(sizeof(small_page_v4));
|
2025-12-10 22:57:26 +09:00
|
|
|
|
if (!page) return NULL;
|
2025-12-10 17:58:42 +09:00
|
|
|
|
memset(page, 0, sizeof(*page));
|
|
|
|
|
|
page->class_idx = (uint8_t)class_idx;
|
|
|
|
|
|
page->capacity = lease->capacity;
|
|
|
|
|
|
page->used = 0;
|
|
|
|
|
|
page->block_size = (uint32_t)tiny_stride_for_class((int)class_idx);
|
|
|
|
|
|
page->base = lease->base;
|
|
|
|
|
|
page->slab_ref = lease;
|
2025-12-10 22:57:26 +09:00
|
|
|
|
page->segment = seg;
|
2025-12-10 17:58:42 +09:00
|
|
|
|
page->freelist = v4_build_freelist(lease->base, lease->capacity, page->block_size);
|
|
|
|
|
|
if (!page->freelist) {
|
|
|
|
|
|
free(page);
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
page->next = NULL;
|
|
|
|
|
|
page->flags = 0;
|
|
|
|
|
|
return page;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-11 02:39:32 +09:00
|
|
|
|
// Phase v4-mid-SEGV: C6-specific SmallSegment page allocation (NO TinyHeap)
|
|
|
|
|
|
static small_page_v4* c6_segment_alloc_page_direct(void) {
|
|
|
|
|
|
// C6 専用: SmallSegment から直接ページ取得(TinyHeap 経由しない)
|
|
|
|
|
|
small_segment_v4* seg = smallsegment_v4_acquire(6);
|
|
|
|
|
|
if (!seg) return NULL;
|
|
|
|
|
|
|
|
|
|
|
|
// For C6, directly allocate from SmallSegment v4 via internal tiny_ctx
|
|
|
|
|
|
// (This path still uses tiny_heap_prepare_page internally but manages the lease independently)
|
|
|
|
|
|
small_segment_v4_internal* int_seg = (small_segment_v4_internal*)seg;
|
|
|
|
|
|
if (!int_seg->tiny_ctx) {
|
|
|
|
|
|
int_seg->tiny_ctx = tiny_heap_ctx_for_thread();
|
|
|
|
|
|
}
|
|
|
|
|
|
tiny_heap_ctx_t* tctx = int_seg->tiny_ctx;
|
|
|
|
|
|
if (!tctx) return NULL;
|
|
|
|
|
|
|
|
|
|
|
|
// Get fresh page from TinyHeap for C6 segment
|
|
|
|
|
|
tiny_heap_page_t* lease = tiny_heap_prepare_page(tctx, 6);
|
|
|
|
|
|
if (!lease) return NULL;
|
|
|
|
|
|
|
|
|
|
|
|
// Unlink from TinyHeap's class list to ensure C6 owns it exclusively
|
|
|
|
|
|
tiny_heap_class_t* hcls = tiny_heap_class(tctx, 6);
|
|
|
|
|
|
if (hcls) {
|
|
|
|
|
|
tiny_heap_class_unlink(hcls, lease);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Build v4 page metadata with segment ownership
|
|
|
|
|
|
return v4_page_from_lease(lease, 6, seg);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Phase v4-mid-SEGV: C6-specific SmallSegment page release (NO TinyHeap return)
|
|
|
|
|
|
static void c6_segment_release_page_direct(small_page_v4* page) {
|
|
|
|
|
|
if (!page) return;
|
|
|
|
|
|
|
|
|
|
|
|
// C6 専用: SmallSegment に直接返却(TinyHeap に返さない)
|
|
|
|
|
|
small_segment_v4* seg = (small_segment_v4*)page->segment;
|
|
|
|
|
|
if (!seg) {
|
|
|
|
|
|
// Fallback: If no segment, just free metadata
|
|
|
|
|
|
free(page);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Release page back to segment (via TinyHeap's empty path, but segment-owned)
|
|
|
|
|
|
small_segment_v4_internal* int_seg = (small_segment_v4_internal*)seg;
|
|
|
|
|
|
if (int_seg->tiny_ctx) {
|
|
|
|
|
|
tiny_heap_page_t* lease = (tiny_heap_page_t*)page->slab_ref;
|
|
|
|
|
|
if (lease) {
|
|
|
|
|
|
tiny_heap_page_becomes_empty(int_seg->tiny_ctx, 6, lease);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
free(page);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 22:57:26 +09:00
|
|
|
|
static small_page_v4* cold_refill_page_v4(small_heap_ctx_v4* hot_ctx, uint32_t class_idx) {
|
|
|
|
|
|
if (__builtin_expect(!v4_class_supported((int)class_idx), 0)) return NULL;
|
|
|
|
|
|
(void)hot_ctx;
|
|
|
|
|
|
|
2025-12-11 02:39:32 +09:00
|
|
|
|
// Phase v4-mid-SEGV: C6 専用経路(TinyHeap 共有を排除)
|
|
|
|
|
|
if (class_idx == 6) {
|
|
|
|
|
|
return c6_segment_alloc_page_direct();
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// 他のクラス (C5/C7): 既存経路のまま
|
2025-12-10 22:57:26 +09:00
|
|
|
|
if (smallsegment_v4_enabled()) {
|
|
|
|
|
|
small_segment_v4* seg = smallsegment_v4_acquire((int)class_idx);
|
|
|
|
|
|
return (small_page_v4*)smallsegment_v4_alloc_page(seg, (int)class_idx);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
tiny_heap_ctx_t* tctx = tiny_heap_ctx_for_thread();
|
|
|
|
|
|
if (!tctx) return NULL;
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Phase v4-mid-6: Get a fresh page from TinyHeap
|
2025-12-10 22:57:26 +09:00
|
|
|
|
tiny_heap_page_t* lease = tiny_heap_prepare_page(tctx, (int)class_idx);
|
|
|
|
|
|
if (!lease) return NULL;
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Clear TinyHeap's current so next call gets fresh page
|
|
|
|
|
|
tiny_heap_class_t* hcls = tiny_heap_class(tctx, (int)class_idx);
|
|
|
|
|
|
if (hcls) {
|
|
|
|
|
|
tiny_heap_class_unlink(hcls, lease);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 22:57:26 +09:00
|
|
|
|
return v4_page_from_lease(lease, (int)class_idx, NULL);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
static void cold_retire_page_v4(small_heap_ctx_v4* hot_ctx, uint32_t class_idx, small_page_v4* page) {
|
|
|
|
|
|
(void)hot_ctx;
|
|
|
|
|
|
if (!page) return;
|
2025-12-11 02:39:32 +09:00
|
|
|
|
|
|
|
|
|
|
// Phase v4-mid-SEGV: C6 専用経路(TinyHeap に返却しない)
|
|
|
|
|
|
if (class_idx == 6 && page->segment) {
|
|
|
|
|
|
c6_segment_release_page_direct(page);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// 他のクラス (C5/C7): 既存経路のまま
|
2025-12-10 22:57:26 +09:00
|
|
|
|
if (smallsegment_v4_enabled()) {
|
|
|
|
|
|
small_segment_v4* seg = (small_segment_v4*)page->segment;
|
|
|
|
|
|
smallsegment_v4_release_if_empty(seg, page, (int)class_idx);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
2025-12-10 17:58:42 +09:00
|
|
|
|
tiny_heap_ctx_t* tctx = tiny_heap_ctx_for_thread();
|
|
|
|
|
|
tiny_heap_page_t* lease = (tiny_heap_page_t*)page->slab_ref;
|
|
|
|
|
|
if (tctx && lease) {
|
|
|
|
|
|
tiny_heap_page_becomes_empty(tctx, (int)class_idx, lease);
|
|
|
|
|
|
}
|
|
|
|
|
|
free(page);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 23:30:32 +09:00
|
|
|
|
// Direct function implementations (phase v4-mid-0: cold_refill/retire を直接呼び出す)
|
2025-12-10 23:23:07 +09:00
|
|
|
|
small_page_v4* small_cold_v4_refill_page(small_heap_ctx_v4* ctx, uint32_t class_idx) {
|
2025-12-10 23:30:32 +09:00
|
|
|
|
return cold_refill_page_v4(ctx, class_idx);
|
2025-12-10 23:23:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void small_cold_v4_retire_page(small_heap_ctx_v4* ctx, small_page_v4* page) {
|
2025-12-10 23:30:32 +09:00
|
|
|
|
if (!page) return;
|
|
|
|
|
|
cold_retire_page_v4(ctx, (uint32_t)page->class_idx, page);
|
2025-12-10 23:23:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool small_cold_v4_remote_push(small_page_v4* page, void* ptr, uint32_t tid) {
|
2025-12-10 23:30:32 +09:00
|
|
|
|
(void)page; (void)ptr; (void)tid;
|
|
|
|
|
|
return false; // stub: not yet implemented
|
2025-12-10 23:23:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void small_cold_v4_remote_drain(small_heap_ctx_v4* ctx) {
|
2025-12-10 23:30:32 +09:00
|
|
|
|
(void)ctx;
|
|
|
|
|
|
// stub: not yet implemented
|
2025-12-10 23:23:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 23:37:45 +09:00
|
|
|
|
// ============================================================================
|
|
|
|
|
|
// smallsegment_v4_page_meta_of: Pointer → Page metadata lookup
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
// Phase v4-mid-1: Implement mask+shift O(1) lookup for Fail-Fast validation.
|
|
|
|
|
|
//
|
|
|
|
|
|
// Algorithm:
|
|
|
|
|
|
// 1. Compute segment base: addr & ~(SMALL_SEGMENT_V4_SIZE - 1)
|
|
|
|
|
|
// 2. Verify magic number
|
|
|
|
|
|
// 3. Compute page_idx: (addr - seg_base) >> SMALL_SEGMENT_V4_PAGE_SHIFT
|
|
|
|
|
|
// 4. Return &seg->page_meta[page_idx] or NULL
|
|
|
|
|
|
|
2025-12-10 23:23:07 +09:00
|
|
|
|
small_page_v4* smallsegment_v4_page_meta_of(small_segment_v4* seg, void* ptr) {
|
2025-12-10 23:37:45 +09:00
|
|
|
|
if (!seg || !ptr) {
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
uintptr_t addr = (uintptr_t)ptr;
|
|
|
|
|
|
uintptr_t seg_base = addr & ~(SMALL_SEGMENT_V4_SIZE - 1);
|
|
|
|
|
|
|
|
|
|
|
|
// Verify segment pointer and magic
|
|
|
|
|
|
SmallSegment* s = (SmallSegment*)seg_base;
|
|
|
|
|
|
if (!s || s->magic != SMALL_SEGMENT_V4_MAGIC) {
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Compute page index and bounds check
|
|
|
|
|
|
size_t page_idx = (addr - seg_base) >> SMALL_SEGMENT_V4_PAGE_SHIFT;
|
|
|
|
|
|
if (page_idx >= s->num_pages) {
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Return page metadata (computed as flexible array offset)
|
|
|
|
|
|
// Note: For now, just return a non-NULL marker.
|
|
|
|
|
|
// Actual page_meta[] array will be implemented in Phase v4-mid-2.
|
|
|
|
|
|
return (SmallPageMeta*)(1); // Non-NULL sentinel for now
|
2025-12-10 23:23:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
|
// alloc/free
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
static small_page_v4* small_alloc_slow_v4(small_heap_ctx_v4* ctx, int class_idx) {
|
|
|
|
|
|
small_class_heap_v4* h = &ctx->cls[class_idx];
|
2025-12-10 19:14:38 +09:00
|
|
|
|
const uint32_t partial_limit = v4_partial_limit(class_idx);
|
2025-12-10 17:58:42 +09:00
|
|
|
|
small_page_v4* cur = h->current;
|
|
|
|
|
|
if (cur && cur->freelist) {
|
|
|
|
|
|
return cur; // usable current
|
|
|
|
|
|
}
|
|
|
|
|
|
if (cur && !cur->freelist) {
|
2025-12-10 18:03:28 +09:00
|
|
|
|
// current をいったん partial/full に退避(partial を優先)
|
2025-12-10 19:14:38 +09:00
|
|
|
|
if (h->partial_count < partial_limit) {
|
2025-12-10 18:03:28 +09:00
|
|
|
|
v4_page_push_partial(h, cur);
|
|
|
|
|
|
} else {
|
|
|
|
|
|
v4_page_push_full(h, cur);
|
|
|
|
|
|
}
|
2025-12-10 17:58:42 +09:00
|
|
|
|
h->current = NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// partial から 1 ページだけ復帰
|
|
|
|
|
|
small_page_v4* from_partial = v4_page_pop_partial(h);
|
|
|
|
|
|
if (from_partial) {
|
|
|
|
|
|
h->current = from_partial;
|
|
|
|
|
|
return from_partial;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 23:30:32 +09:00
|
|
|
|
// Call direct Cold function (not vtable)
|
|
|
|
|
|
small_page_v4* page = small_cold_v4_refill_page(ctx, (uint32_t)class_idx);
|
2025-12-10 17:58:42 +09:00
|
|
|
|
if (!page) return NULL;
|
|
|
|
|
|
h->current = page;
|
|
|
|
|
|
return page;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void* small_heap_alloc_fast_v4(small_heap_ctx_v4* ctx, int class_idx) {
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Phase v4-mid-5: Add stats instrumentation
|
|
|
|
|
|
small_heap_v4_stat_alloc_call(class_idx);
|
2025-12-10 23:37:45 +09:00
|
|
|
|
|
2025-12-11 01:44:08 +09:00
|
|
|
|
// Phase v4-mid-6: C6 Fastlist Path
|
|
|
|
|
|
if (class_idx == 6 && small_heap_v4_fastlist_enabled()) {
|
|
|
|
|
|
SmallC6FastState* s = &g_small_c6_fast;
|
|
|
|
|
|
if (likely(s->freelist)) {
|
|
|
|
|
|
void* b = s->freelist;
|
|
|
|
|
|
s->freelist = *(void**)b;
|
|
|
|
|
|
s->used++;
|
|
|
|
|
|
small_heap_v4_stat_alloc_success(class_idx);
|
|
|
|
|
|
return tiny_region_id_write_header(b, class_idx);
|
|
|
|
|
|
}
|
2025-12-11 02:39:32 +09:00
|
|
|
|
// Fastlist empty: sync used back to meta before slow path, then reset
|
2025-12-11 01:44:08 +09:00
|
|
|
|
if (s->meta) {
|
|
|
|
|
|
s->meta->used = (uint16_t)s->used;
|
2025-12-11 02:39:32 +09:00
|
|
|
|
// Reset fastlist state to avoid stale pointer issues
|
|
|
|
|
|
s->meta = NULL;
|
|
|
|
|
|
s->page_base = NULL;
|
|
|
|
|
|
s->capacity = 0;
|
|
|
|
|
|
s->used = 0;
|
2025-12-11 01:44:08 +09:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Phase v4-mid-2: C6-only full SmallHeapCtx v4 implementation
|
2025-12-10 19:14:38 +09:00
|
|
|
|
if (__builtin_expect(!v4_class_supported(class_idx), 0)) {
|
2025-12-11 01:01:15 +09:00
|
|
|
|
small_heap_v4_stat_alloc_fallback_pool(class_idx);
|
2025-12-10 19:14:38 +09:00
|
|
|
|
return NULL; // C5/C6/C7 以外は未対応
|
2025-12-10 17:58:42 +09:00
|
|
|
|
}
|
2025-12-11 01:01:15 +09:00
|
|
|
|
if (!small_heap_v4_class_enabled((uint8_t)class_idx)) {
|
|
|
|
|
|
small_heap_v4_stat_alloc_fallback_pool(class_idx);
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
small_class_heap_v4* h = &ctx->cls[class_idx];
|
|
|
|
|
|
small_page_v4* page = h->current;
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Try current page freelist
|
|
|
|
|
|
if (page && page->freelist) {
|
|
|
|
|
|
void* blk = page->freelist;
|
|
|
|
|
|
void* next = NULL;
|
|
|
|
|
|
memcpy(&next, blk, sizeof(void*));
|
|
|
|
|
|
page->freelist = next;
|
|
|
|
|
|
page->used++;
|
|
|
|
|
|
small_heap_v4_stat_alloc_success(class_idx);
|
|
|
|
|
|
return tiny_region_id_write_header(blk, class_idx);
|
2025-12-10 17:58:42 +09:00
|
|
|
|
}
|
2025-12-11 01:01:15 +09:00
|
|
|
|
|
|
|
|
|
|
// Current exhausted or NULL, try slow path (partial/refill)
|
|
|
|
|
|
page = small_alloc_slow_v4(ctx, class_idx);
|
2025-12-10 17:58:42 +09:00
|
|
|
|
if (!page || !page->freelist) {
|
2025-12-11 01:01:15 +09:00
|
|
|
|
small_heap_v4_stat_alloc_null_page(class_idx);
|
|
|
|
|
|
small_heap_v4_stat_alloc_fallback_pool(class_idx);
|
2025-12-10 17:58:42 +09:00
|
|
|
|
return NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-11 01:44:08 +09:00
|
|
|
|
// Phase v4-mid-6: Promote to C6 Fastlist
|
|
|
|
|
|
if (class_idx == 6 && small_heap_v4_fastlist_enabled()) {
|
|
|
|
|
|
if (!page) {
|
|
|
|
|
|
// Should not happen
|
|
|
|
|
|
} else if (!page->freelist) {
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
} else {
|
|
|
|
|
|
SmallC6FastState* s = &g_small_c6_fast;
|
|
|
|
|
|
s->meta = page;
|
|
|
|
|
|
s->page_base = page->base;
|
|
|
|
|
|
s->capacity = page->capacity;
|
|
|
|
|
|
s->used = page->used;
|
|
|
|
|
|
s->freelist = page->freelist;
|
|
|
|
|
|
page->freelist = NULL; // Steal freelist ownership
|
|
|
|
|
|
|
|
|
|
|
|
// Retry fast path
|
|
|
|
|
|
if (likely(s->freelist)) {
|
|
|
|
|
|
void* b = s->freelist;
|
|
|
|
|
|
s->freelist = *(void**)b;
|
|
|
|
|
|
s->used++;
|
|
|
|
|
|
small_heap_v4_stat_alloc_success(class_idx);
|
|
|
|
|
|
return tiny_region_id_write_header(b, class_idx);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Allocate from newly acquired/promoted page
|
2025-12-10 17:58:42 +09:00
|
|
|
|
void* blk = page->freelist;
|
|
|
|
|
|
void* next = NULL;
|
|
|
|
|
|
memcpy(&next, blk, sizeof(void*));
|
|
|
|
|
|
page->freelist = next;
|
|
|
|
|
|
page->used++;
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
small_heap_v4_stat_alloc_success(class_idx);
|
2025-12-10 17:58:42 +09:00
|
|
|
|
return tiny_region_id_write_header(blk, class_idx);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void v4_unlink_from_list(small_class_heap_v4* h, v4_loc_t loc, small_page_v4* prev, small_page_v4* page) {
|
|
|
|
|
|
if (!h || !page) return;
|
|
|
|
|
|
switch (loc) {
|
|
|
|
|
|
case V4_LOC_CURRENT:
|
|
|
|
|
|
h->current = NULL;
|
|
|
|
|
|
break;
|
|
|
|
|
|
case V4_LOC_PARTIAL:
|
|
|
|
|
|
if (prev) prev->next = page->next;
|
|
|
|
|
|
else h->partial_head = page->next;
|
|
|
|
|
|
if (h->partial_count > 0) {
|
|
|
|
|
|
h->partial_count--;
|
|
|
|
|
|
}
|
|
|
|
|
|
break;
|
|
|
|
|
|
case V4_LOC_FULL:
|
|
|
|
|
|
if (prev) prev->next = page->next;
|
|
|
|
|
|
else h->full_head = page->next;
|
|
|
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
|
|
|
break;
|
|
|
|
|
|
}
|
|
|
|
|
|
page->next = NULL;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-11 01:44:08 +09:00
|
|
|
|
extern void hak_pool_free(void* ptr, size_t size, uintptr_t site_id);
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
void small_heap_free_fast_v4(small_heap_ctx_v4* ctx, int class_idx, void* ptr) {
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Phase v4-mid-5: Add stats instrumentation
|
|
|
|
|
|
small_heap_v4_stat_free_call(class_idx);
|
2025-12-10 23:37:45 +09:00
|
|
|
|
|
2025-12-11 01:44:08 +09:00
|
|
|
|
// Phase v4-mid-6: C6 Fastlist Path
|
|
|
|
|
|
if (class_idx == 6 && small_heap_v4_fastlist_enabled()) {
|
|
|
|
|
|
SmallC6FastState* s = &g_small_c6_fast;
|
|
|
|
|
|
if (s->page_base && (uintptr_t)ptr >= (uintptr_t)s->page_base) {
|
|
|
|
|
|
// Use actual block size from meta
|
|
|
|
|
|
uint32_t bsize = (s->meta) ? s->meta->block_size : 512;
|
|
|
|
|
|
size_t span = (size_t)s->capacity * bsize;
|
|
|
|
|
|
if ((uintptr_t)ptr < (uintptr_t)s->page_base + span) {
|
|
|
|
|
|
*(void**)ptr = s->freelist;
|
|
|
|
|
|
s->freelist = ptr;
|
|
|
|
|
|
s->used--;
|
|
|
|
|
|
small_heap_v4_stat_free_page_found(class_idx);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Phase v4-mid-2: C6-only full SmallHeapCtx v4 implementation
|
2025-12-10 19:14:38 +09:00
|
|
|
|
if (__builtin_expect(!v4_class_supported(class_idx), 0)) {
|
2025-12-10 17:58:42 +09:00
|
|
|
|
return;
|
|
|
|
|
|
}
|
2025-12-10 18:18:05 +09:00
|
|
|
|
if (!small_heap_v4_class_enabled((uint8_t)class_idx)) return;
|
2025-12-10 17:58:42 +09:00
|
|
|
|
if (!ptr) return;
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// Phase v4-mid-6: ptr is already BASE (caller converts USER→BASE before calling us)
|
|
|
|
|
|
// See malloc_tiny_fast.h L254: base = ptr - 1, then L354/L282 passes base
|
|
|
|
|
|
void* base_ptr = ptr;
|
|
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
small_class_heap_v4* h = &ctx->cls[class_idx];
|
|
|
|
|
|
small_page_v4* prev = NULL;
|
|
|
|
|
|
v4_loc_t loc = V4_LOC_NONE;
|
2025-12-11 01:01:15 +09:00
|
|
|
|
|
|
|
|
|
|
// Try to find page in current/partial/full lists (using BASE pointer)
|
|
|
|
|
|
small_page_v4* page = v4_find_page(h, (const uint8_t*)base_ptr, &loc, &prev);
|
|
|
|
|
|
|
|
|
|
|
|
// Phase v4-mid-2: If page not found in v4 heap, try page_meta_of() for segment lookup
|
|
|
|
|
|
if (!page) {
|
|
|
|
|
|
small_heap_v4_stat_free_page_not_found(class_idx);
|
|
|
|
|
|
// Try to find via segment mask+shift (requires segment to be initialized)
|
|
|
|
|
|
// For now, this is a fallback for future segment-based allocation
|
2025-12-11 01:44:08 +09:00
|
|
|
|
// Fallback to pool v1 (avoid recursion via free())
|
|
|
|
|
|
hak_pool_free(base_ptr, 0, 0);
|
2025-12-11 01:01:15 +09:00
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
small_heap_v4_stat_free_page_found(class_idx);
|
2025-12-10 17:58:42 +09:00
|
|
|
|
|
2025-12-10 19:14:38 +09:00
|
|
|
|
const uint32_t partial_limit = v4_partial_limit(class_idx);
|
|
|
|
|
|
|
2025-12-11 02:39:32 +09:00
|
|
|
|
// Phase v4-mid-SEGV: Sync C6 fastlist state back to page before any manipulation
|
|
|
|
|
|
if (class_idx == 6 && small_heap_v4_fastlist_enabled()) {
|
|
|
|
|
|
SmallC6FastState* s = &g_small_c6_fast;
|
|
|
|
|
|
if (s->meta == page) {
|
|
|
|
|
|
// Sync fastlist state back to page metadata
|
|
|
|
|
|
page->freelist = s->freelist;
|
|
|
|
|
|
page->used = (uint16_t)s->used;
|
|
|
|
|
|
// Invalidate fastlist state (slow path takes over)
|
|
|
|
|
|
s->meta = NULL;
|
|
|
|
|
|
s->page_base = NULL;
|
|
|
|
|
|
s->freelist = NULL;
|
|
|
|
|
|
s->capacity = 0;
|
|
|
|
|
|
s->used = 0;
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-12-11 01:01:15 +09:00
|
|
|
|
// freelist push (use BASE pointer, not USER pointer)
|
2025-12-10 17:58:42 +09:00
|
|
|
|
void* head = page->freelist;
|
2025-12-11 01:01:15 +09:00
|
|
|
|
memcpy(base_ptr, &head, sizeof(void*));
|
|
|
|
|
|
page->freelist = base_ptr;
|
2025-12-10 17:58:42 +09:00
|
|
|
|
if (page->used > 0) {
|
|
|
|
|
|
page->used--;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (page->used == 0) {
|
2025-12-11 02:39:32 +09:00
|
|
|
|
|
2025-12-10 17:58:42 +09:00
|
|
|
|
if (loc != V4_LOC_CURRENT) {
|
|
|
|
|
|
v4_unlink_from_list(h, loc, prev, page);
|
|
|
|
|
|
}
|
|
|
|
|
|
if (!h->current) {
|
|
|
|
|
|
h->current = page;
|
|
|
|
|
|
page->next = NULL;
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
if (h->current == page) {
|
|
|
|
|
|
page->next = NULL;
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
2025-12-10 19:14:38 +09:00
|
|
|
|
if (h->partial_count < partial_limit) {
|
2025-12-10 17:58:42 +09:00
|
|
|
|
v4_page_push_partial(h, page);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
2025-12-10 23:30:32 +09:00
|
|
|
|
// Call direct Cold function (not vtable)
|
|
|
|
|
|
small_cold_v4_retire_page(ctx, page);
|
2025-12-10 17:58:42 +09:00
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (!h->current) {
|
|
|
|
|
|
// このページを current に据える
|
|
|
|
|
|
if (loc != V4_LOC_CURRENT) {
|
|
|
|
|
|
v4_unlink_from_list(h, loc, prev, page);
|
|
|
|
|
|
}
|
|
|
|
|
|
h->current = page;
|
|
|
|
|
|
page->next = NULL;
|
|
|
|
|
|
} else if (loc == V4_LOC_FULL && page->freelist) {
|
2025-12-10 18:04:32 +09:00
|
|
|
|
// full → partial に戻す(current があっても partial 上限までは復帰)
|
2025-12-10 17:58:42 +09:00
|
|
|
|
v4_unlink_from_list(h, loc, prev, page);
|
2025-12-10 19:14:38 +09:00
|
|
|
|
if (h->partial_count < partial_limit) {
|
2025-12-10 18:04:32 +09:00
|
|
|
|
v4_page_push_partial(h, page);
|
|
|
|
|
|
} else {
|
|
|
|
|
|
v4_page_push_full(h, page); // 上限超なら戻す
|
|
|
|
|
|
}
|
2025-12-10 17:58:42 +09:00
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-12-11 01:01:15 +09:00
|
|
|
|
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
// Stats dump (Phase v4-mid-5)
|
|
|
|
|
|
// ============================================================================
|
|
|
|
|
|
void small_heap_v4_stats_dump(void) {
|
|
|
|
|
|
if (!small_heap_v4_stats_enabled()) {
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fprintf(stderr, "\n========================================\n");
|
|
|
|
|
|
fprintf(stderr, "[SMALL_HEAP_V4_STATS] Summary\n");
|
|
|
|
|
|
fprintf(stderr, "========================================\n");
|
|
|
|
|
|
|
|
|
|
|
|
for (int c = 0; c < 8; c++) {
|
|
|
|
|
|
uint64_t alloc_calls = atomic_load_explicit(&g_small_heap_v4_stats[c].alloc_calls, memory_order_relaxed);
|
|
|
|
|
|
uint64_t alloc_success = atomic_load_explicit(&g_small_heap_v4_stats[c].alloc_success, memory_order_relaxed);
|
|
|
|
|
|
uint64_t alloc_null_page = atomic_load_explicit(&g_small_heap_v4_stats[c].alloc_null_page, memory_order_relaxed);
|
|
|
|
|
|
uint64_t alloc_fallback = atomic_load_explicit(&g_small_heap_v4_stats[c].alloc_fallback_pool, memory_order_relaxed);
|
|
|
|
|
|
uint64_t free_calls = atomic_load_explicit(&g_small_heap_v4_stats[c].free_calls, memory_order_relaxed);
|
|
|
|
|
|
uint64_t free_found = atomic_load_explicit(&g_small_heap_v4_stats[c].free_page_found, memory_order_relaxed);
|
|
|
|
|
|
uint64_t free_not_found = atomic_load_explicit(&g_small_heap_v4_stats[c].free_page_not_found, memory_order_relaxed);
|
|
|
|
|
|
|
|
|
|
|
|
if (alloc_calls > 0 || free_calls > 0) {
|
|
|
|
|
|
fprintf(stderr, "\nClass C%d:\n", c);
|
|
|
|
|
|
fprintf(stderr, " Alloc: calls=%lu success=%lu null_page=%lu fallback_pool=%lu\n",
|
|
|
|
|
|
(unsigned long)alloc_calls, (unsigned long)alloc_success,
|
|
|
|
|
|
(unsigned long)alloc_null_page, (unsigned long)alloc_fallback);
|
|
|
|
|
|
fprintf(stderr, " Free: calls=%lu page_found=%lu page_not_found=%lu\n",
|
|
|
|
|
|
(unsigned long)free_calls, (unsigned long)free_found,
|
|
|
|
|
|
(unsigned long)free_not_found);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fprintf(stderr, "========================================\n\n");
|
|
|
|
|
|
fflush(stderr);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Automatic dump at program exit
|
|
|
|
|
|
static void small_heap_v4_stats_atexit(void) __attribute__((destructor));
|
|
|
|
|
|
static void small_heap_v4_stats_atexit(void) {
|
|
|
|
|
|
small_heap_v4_stats_dump();
|
|
|
|
|
|
}
|