Guard madvise ENOMEM and stabilize pool/tiny front v3

This commit is contained in:
Moe Charm (CI)
2025-12-09 21:50:15 +09:00
parent e274d5f6a9
commit a905e0ffdd
45 changed files with 3154 additions and 242 deletions

View File

@ -46,7 +46,9 @@
#include "hakmem_pool.h"
#include "hakmem_config.h"
#include "hakmem_internal.h" // For AllocHeader and HAKMEM_MAGIC
#include "box/pool_hotbox_v2_header_box.h"
#include "hakmem_syscall.h" // Box 3 syscall layer (bypasses LD_PRELOAD)
#include "box/pool_hotbox_v2_box.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
@ -58,6 +60,11 @@
#include "hakmem_policy.h" // FrozenPolicy caps (Soft CAP gating)
#include "hakmem_debug.h"
#define POOL_HOTBOX_V2_HEADER_BYTES ((size_t)sizeof(void*))
// Use an over-sized mapping to guarantee POOL_PAGE_SIZE alignment for the
// v2 page base. This keeps page_of() O(1) without relying on mmap alignment.
#define POOL_HOTBOX_V2_MAP_LEN (POOL_PAGE_SIZE * 2)
// False sharing mitigation: padded mutex type (64B)
typedef struct { pthread_mutex_t m; char _pad[64 - (sizeof(pthread_mutex_t) % 64)]; } PaddedMutex;
@ -808,6 +815,513 @@ static int g_pool_min_bundle = 2; // env: HAKMEM_POOL_MIN_BUNDLE (default 2)
static int g_count_sample_exp = 10; // env: HAKMEM_POOL_COUNT_SAMPLE (0..16)
static __thread uint32_t t_pool_rng = 0x243f6a88u; // per-thread RNG for sampling
// ---------------------------------------------------------------------------
// PoolHotBox v2 scaffolding (research-only; defaults to v1)
// ---------------------------------------------------------------------------
PoolHotBoxV2Stats g_pool_hotbox_v2_stats[POOL_NUM_CLASSES];
static __thread pool_ctx_v2* g_pool_ctx_v2 = NULL;
// Forward decls for helpers used in HotBox v2.
static inline uint32_t pool_hotbox_v2_block_size(int ci);
static inline uint32_t pool_block_size_for_class(int ci);
static inline void mid_set_header(AllocHeader* hdr, size_t class_sz, uintptr_t site_id);
static inline void mid_page_inuse_inc(void* raw);
static void* pool_cold_refill_page_v1(void* cold_ctx, uint32_t ci, uint32_t* out_block_size, uint32_t* out_capacity, void** out_slab_ref);
static void pool_cold_retire_page_v1(void* cold_ctx, uint32_t ci, void* slab_ref, void* base);
static int pool_hotbox_v2_global_enabled(void) {
static int g = -1;
if (__builtin_expect(g == -1, 0)) {
const char* e = getenv("HAKMEM_POOL_V2_ENABLED");
g = (e && *e && *e != '0') ? 1 : 0;
}
return g;
}
static unsigned pool_hotbox_v2_class_mask(void) {
static int parsed = 0;
static unsigned mask = 0;
if (__builtin_expect(!parsed, 0)) {
const char* e = getenv("HAKMEM_POOL_V2_CLASSES");
if (e && *e) {
mask = (unsigned)strtoul(e, NULL, 0);
} else {
mask = 0; // default: all OFF (opt-in only)
}
parsed = 1;
}
return mask;
}
int pool_hotbox_v2_class_enabled(int class_idx) {
if (!pool_hotbox_v2_global_enabled()) return 0;
if (class_idx < 0 || class_idx >= POOL_NUM_CLASSES) return 0;
unsigned mask = pool_hotbox_v2_class_mask();
static int logged = 0;
if (__builtin_expect(!logged && pool_hotbox_v2_stats_enabled(), 0)) {
fprintf(stderr, "[POOL_V2_MASK] enabled=0x%x\n", mask);
logged = 1;
}
return (mask & (1u << class_idx)) != 0;
}
int pool_hotbox_v2_stats_enabled(void) {
static int g = -1;
if (__builtin_expect(g == -1, 0)) {
const char* e = getenv("HAKMEM_POOL_V2_STATS");
g = (e && *e && *e != '0') ? 1 : 0;
}
return g;
}
pool_ctx_v2* pool_v2_tls_get(void) {
pool_ctx_v2* ctx = g_pool_ctx_v2;
if (__builtin_expect(ctx == NULL, 0)) {
ctx = (pool_ctx_v2*)calloc(1, sizeof(pool_ctx_v2));
if (!ctx) abort();
for (int i = 0; i < POOL_NUM_CLASSES; i++) {
uint32_t user_sz = pool_block_size_for_class(i);
ctx->cls[i].block_size = user_sz ? (user_sz + HEADER_SIZE) : 0;
ctx->cls[i].max_partial_pages = 2;
}
g_pool_ctx_v2 = ctx;
}
return ctx;
}
static inline uint32_t pool_hotbox_v2_block_size(int ci) {
switch (ci) {
case 0: return POOL_CLASS_2KB;
case 1: return POOL_CLASS_4KB;
case 2: return POOL_CLASS_8KB;
case 3: return POOL_CLASS_16KB;
case 4: return POOL_CLASS_32KB;
case 5: return POOL_CLASS_40KB;
case 6: return POOL_CLASS_52KB;
default: return 0;
}
}
static inline uint32_t pool_block_size_for_class(int ci) {
return pool_hotbox_v2_block_size(ci);
}
static inline void pool_hotbox_v2_record_alloc(uint32_t ci) {
if ((int)ci >= POOL_NUM_CLASSES) return;
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].alloc_calls, 1, memory_order_relaxed);
}
static inline void pool_hotbox_v2_record_alloc_refill(uint32_t ci) {
if ((int)ci >= POOL_NUM_CLASSES) return;
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].alloc_refill, 1, memory_order_relaxed);
}
static inline void pool_hotbox_v2_record_alloc_refill_fail(uint32_t ci) {
if ((int)ci >= POOL_NUM_CLASSES) return;
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].alloc_refill_fail, 1, memory_order_relaxed);
}
void pool_hotbox_v2_record_alloc_fallback(uint32_t ci) {
if ((int)ci >= POOL_NUM_CLASSES) return;
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].alloc_fallback_v1, 1, memory_order_relaxed);
}
static inline void pool_hotbox_v2_record_free(uint32_t ci) {
if ((int)ci >= POOL_NUM_CLASSES) return;
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].free_calls, 1, memory_order_relaxed);
}
void pool_hotbox_v2_record_free_call(uint32_t ci) {
pool_hotbox_v2_record_free(ci);
}
void pool_hotbox_v2_record_free_fallback(uint32_t ci) {
if ((int)ci >= POOL_NUM_CLASSES) return;
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].free_fallback_v1, 1, memory_order_relaxed);
}
enum pool_v2_pageof_fail {
POOL_V2_PAGEOF_NONE = 0,
POOL_V2_PAGEOF_OUT_OF_RANGE = 1,
POOL_V2_PAGEOF_MISALIGNED = 2,
POOL_V2_PAGEOF_HEADER_MISSING = 3,
POOL_V2_PAGEOF_UNKNOWN = 4,
};
static inline void pool_hotbox_v2_record_pageof_fail(uint32_t ci, int reason) {
if ((int)ci >= POOL_NUM_CLASSES) return;
switch (reason) {
case POOL_V2_PAGEOF_HEADER_MISSING:
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].page_of_fail_header_missing, 1, memory_order_relaxed);
break;
case POOL_V2_PAGEOF_OUT_OF_RANGE:
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].page_of_fail_out_of_range, 1, memory_order_relaxed);
break;
case POOL_V2_PAGEOF_MISALIGNED:
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].page_of_fail_misaligned, 1, memory_order_relaxed);
break;
case POOL_V2_PAGEOF_UNKNOWN:
default:
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].page_of_fail_unknown, 1, memory_order_relaxed);
break;
}
}
static pool_page_v2* pool_hotbox_v2_page_acquire(void) {
pool_page_v2* p = (pool_page_v2*)calloc(1, sizeof(pool_page_v2));
return p;
}
static void pool_hotbox_v2_page_release(pool_page_v2* p) {
free(p);
}
static void* pool_hotbox_v2_build_freelist(pool_page_v2* p) {
if (!p || !p->base || p->block_size == 0 || p->capacity == 0) return NULL;
uint8_t* base = (uint8_t*)p->base + POOL_HOTBOX_V2_HEADER_BYTES;
void* head = NULL;
for (uint32_t i = 0; i < p->capacity; i++) {
void* blk = base + ((size_t)i * p->block_size);
*(void**)blk = head;
head = blk;
}
return head;
}
static PoolColdIface pool_cold_iface_v1(void);
static pool_page_v2* pool_hotbox_v2_page_of(pool_ctx_v2* ctx, uint32_t ci, void* ptr, int* out_reason) {
if (out_reason) *out_reason = POOL_V2_PAGEOF_UNKNOWN;
if (!ctx || ci >= POOL_NUM_CLASSES || !ptr) return NULL;
// Compute page base by mask (POOL_PAGE_SIZE is a power of two).
void* page_base = pool_hotbox_v2_page_base(ptr, POOL_PAGE_SIZE);
pool_page_v2* p = (pool_page_v2*)pool_hotbox_v2_header_load(page_base);
if (!p) {
if (out_reason) *out_reason = POOL_V2_PAGEOF_HEADER_MISSING;
return NULL;
}
if (p->class_idx != ci || !p->base) {
if (out_reason) *out_reason = POOL_V2_PAGEOF_UNKNOWN;
return NULL;
}
uint8_t* data_base = (uint8_t*)p->base + POOL_HOTBOX_V2_HEADER_BYTES;
size_t span = (size_t)p->block_size * (size_t)p->capacity;
uintptr_t off = (uintptr_t)((uint8_t*)ptr - data_base);
if (off >= span) {
if (out_reason) *out_reason = POOL_V2_PAGEOF_OUT_OF_RANGE;
return NULL;
}
if (off % p->block_size != 0) {
if (out_reason) *out_reason = POOL_V2_PAGEOF_MISALIGNED;
return NULL;
}
if (out_reason) *out_reason = POOL_V2_PAGEOF_NONE;
return p;
}
static void pool_hotbox_v2_page_retire_slow(pool_ctx_v2* ctx, uint32_t ci, pool_page_v2* p) {
(void)ctx;
if (!p) return;
// Clear reverse header to avoid stale page_of hits.
pool_hotbox_v2_header_clear(p->base);
PoolColdIface cold = pool_cold_iface_v1();
if (cold.retire_page) {
void* cold_ctx = NULL;
cold.retire_page(cold_ctx, ci, p->slab_ref, p->base);
}
pool_hotbox_v2_page_release(p);
}
static void pool_hotbox_v2_push_partial(pool_class_v2* hc, pool_page_v2* p) {
if (!hc || !p) return;
p->next = hc->partial;
hc->partial = p;
if (hc->partial_count < UINT16_MAX) hc->partial_count++;
}
static pool_page_v2* pool_hotbox_v2_pop_partial(pool_class_v2* hc) {
if (!hc || !hc->partial) return NULL;
pool_page_v2* p = hc->partial;
hc->partial = p->next;
p->next = NULL;
if (hc->partial_count > 0) hc->partial_count--;
return p;
}
static pool_page_v2* pool_hotbox_v2_take_usable_partial(pool_class_v2* hc) {
if (!hc) return NULL;
pool_page_v2* prev = NULL;
pool_page_v2* p = hc->partial;
while (p) {
if (p->freelist && p->used < p->capacity) {
if (prev) {
prev->next = p->next;
} else {
hc->partial = p->next;
}
p->next = NULL;
if (hc->partial_count > 0) hc->partial_count--;
return p;
}
prev = p;
p = p->next;
}
return NULL;
}
static int pool_hotbox_v2_unlink_partial(pool_class_v2* hc, pool_page_v2* target) {
if (!hc || !target) return 0;
pool_page_v2* prev = NULL;
pool_page_v2* p = hc->partial;
while (p) {
if (p == target) {
if (prev) {
prev->next = p->next;
} else {
hc->partial = p->next;
}
p->next = NULL;
if (hc->partial_count > 0) hc->partial_count--;
return 1;
}
prev = p;
p = p->next;
}
return 0;
}
static void pool_hotbox_v2_record_alloc_fast(uint32_t ci) {
if ((int)ci >= POOL_NUM_CLASSES) return;
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].alloc_fast, 1, memory_order_relaxed);
}
static void pool_hotbox_v2_record_free_fast(uint32_t ci) {
if ((int)ci >= POOL_NUM_CLASSES) return;
atomic_fetch_add_explicit(&g_pool_hotbox_v2_stats[ci].free_fast, 1, memory_order_relaxed);
}
static inline void* pool_hotbox_v2_alloc_fast(pool_ctx_v2* ctx, uint32_t ci, uintptr_t site_id) {
pool_class_v2* hc = &ctx->cls[ci];
pool_page_v2* p = hc->current;
if (p && p->freelist && p->used < p->capacity) {
void* blk = p->freelist;
p->freelist = *(void**)blk;
p->used++;
pool_hotbox_v2_record_alloc_fast(ci);
AllocHeader* hdr = (AllocHeader*)blk;
size_t class_sz = pool_hotbox_v2_block_size((int)ci);
mid_set_header(hdr, class_sz, site_id);
mid_page_inuse_inc(blk);
return (char*)blk + HEADER_SIZE;
}
if (p) {
// Keep exhausted current reachable for free()
pool_hotbox_v2_push_partial(hc, p);
hc->current = NULL;
}
p = pool_hotbox_v2_take_usable_partial(hc);
if (p) {
hc->current = p;
void* blk = p->freelist;
p->freelist = *(void**)blk;
p->used++;
pool_hotbox_v2_record_alloc_fast(ci);
AllocHeader* hdr = (AllocHeader*)blk;
size_t class_sz = pool_hotbox_v2_block_size((int)ci);
mid_set_header(hdr, class_sz, site_id);
mid_page_inuse_inc(blk);
return (char*)blk + HEADER_SIZE;
}
return NULL;
}
static void pool_hotbox_v2_page_init(pool_page_v2* p, uint32_t ci, void* base, uint32_t block_size, uint32_t capacity, void* slab_ref) {
if (!p) return;
// Adjust capacity if caller did not account for header reservation.
size_t avail = (POOL_PAGE_SIZE > POOL_HOTBOX_V2_HEADER_BYTES) ? (POOL_PAGE_SIZE - POOL_HOTBOX_V2_HEADER_BYTES) : 0;
if (block_size > 0) {
uint32_t max_cap = (uint32_t)(avail / (size_t)block_size);
if (capacity == 0 || capacity > max_cap) capacity = max_cap;
}
p->freelist = NULL;
p->used = 0;
p->capacity = capacity;
p->block_size = block_size;
p->class_idx = ci;
p->base = base;
p->slab_ref = slab_ref;
p->next = NULL;
pool_hotbox_v2_header_store(p->base, p);
}
static PoolColdIface pool_cold_iface_v1(void) {
PoolColdIface iface = {pool_cold_refill_page_v1, pool_cold_retire_page_v1};
return iface;
}
static void* pool_cold_refill_page_v1(void* cold_ctx, uint32_t ci, uint32_t* out_block_size, uint32_t* out_capacity, void** out_slab_ref) {
(void)cold_ctx;
uint32_t user_sz = pool_hotbox_v2_block_size((int)ci);
if (user_sz == 0) return NULL;
uint32_t bs = user_sz + HEADER_SIZE;
if (bs == 0) return NULL;
uint32_t cap = 0;
if (POOL_PAGE_SIZE > POOL_HOTBOX_V2_HEADER_BYTES) {
cap = (uint32_t)((POOL_PAGE_SIZE - POOL_HOTBOX_V2_HEADER_BYTES) / bs);
}
if (cap == 0) return NULL;
// Over-allocate so we can align to POOL_PAGE_SIZE (64KiB) for O(1) page_of.
void* raw = mmap(NULL, POOL_HOTBOX_V2_MAP_LEN, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (raw == MAP_FAILED || !raw) {
return NULL;
}
uintptr_t aligned = ((uintptr_t)raw + (POOL_PAGE_SIZE - 1)) & ~((uintptr_t)POOL_PAGE_SIZE - 1);
void* base = (void*)aligned;
// Register page ownership for same-thread fast free consistency.
mid_desc_register(base, (int)ci, (uint64_t)(uintptr_t)pthread_self());
g_pool.refills[ci]++;
g_pool.total_pages_allocated++;
g_pool.pages_by_class[ci]++;
g_pool.total_bytes_allocated += POOL_HOTBOX_V2_MAP_LEN;
if (out_block_size) *out_block_size = bs;
if (out_capacity) *out_capacity = cap;
// slab_ref keeps the raw mapping pointer for unmap.
if (out_slab_ref) *out_slab_ref = raw;
return base;
}
static void pool_cold_retire_page_v1(void* cold_ctx, uint32_t ci, void* slab_ref, void* base) {
(void)cold_ctx;
(void)ci;
void* addr = slab_ref ? slab_ref : base;
if (!addr) return;
if (ci < POOL_NUM_CLASSES) {
if (g_pool.pages_by_class[ci] > 0) g_pool.pages_by_class[ci]--;
}
if (g_pool.total_pages_allocated > 0) g_pool.total_pages_allocated--;
if (g_pool.total_bytes_allocated >= POOL_HOTBOX_V2_MAP_LEN) g_pool.total_bytes_allocated -= POOL_HOTBOX_V2_MAP_LEN;
munmap(addr, POOL_HOTBOX_V2_MAP_LEN);
}
void* pool_hotbox_v2_alloc(uint32_t class_idx, size_t size, uintptr_t site_id) {
(void)size;
(void)site_id;
if ((int)class_idx < 0 || class_idx >= POOL_NUM_CLASSES) return NULL;
pool_hotbox_v2_record_alloc(class_idx);
pool_ctx_v2* ctx = pool_v2_tls_get();
void* blk = pool_hotbox_v2_alloc_fast(ctx, class_idx, site_id);
if (blk) return blk;
// slow: refill via Cold IF
PoolColdIface cold = pool_cold_iface_v1();
uint32_t bs = 0, cap = 0;
void* slab_ref = NULL;
void* base = cold.refill_page ? cold.refill_page(NULL, class_idx, &bs, &cap, &slab_ref) : NULL;
if (!base || !bs || !cap) {
pool_hotbox_v2_record_alloc_refill_fail(class_idx);
return NULL;
}
pool_class_v2* hc = &ctx->cls[class_idx];
pool_page_v2* page = pool_hotbox_v2_page_acquire();
if (!page) {
if (cold.retire_page) cold.retire_page(NULL, class_idx, slab_ref, base);
pool_hotbox_v2_record_alloc_refill_fail(class_idx);
return NULL;
}
pool_hotbox_v2_page_init(page, class_idx, base, bs, cap, slab_ref);
page->freelist = pool_hotbox_v2_build_freelist(page);
if (!page->freelist) {
pool_hotbox_v2_record_alloc_refill_fail(class_idx);
if (cold.retire_page) cold.retire_page(NULL, class_idx, slab_ref, base);
pool_hotbox_v2_page_release(page);
return NULL;
}
hc->current = page;
pool_hotbox_v2_record_alloc_refill(class_idx);
return pool_hotbox_v2_alloc_fast(ctx, class_idx, site_id);
}
int pool_hotbox_v2_free(uint32_t class_idx, void* raw_block) {
if (!raw_block || (int)class_idx < 0 || class_idx >= POOL_NUM_CLASSES) return 0;
pool_hotbox_v2_record_free(class_idx);
pool_ctx_v2* ctx = pool_v2_tls_get();
int pageof_reason = POOL_V2_PAGEOF_UNKNOWN;
pool_page_v2* p = pool_hotbox_v2_page_of(ctx, class_idx, raw_block, &pageof_reason);
if (!p) {
pool_hotbox_v2_record_pageof_fail(class_idx, pageof_reason);
if (pool_hotbox_v2_stats_enabled()) {
static _Atomic uint32_t dbg = 0;
uint32_t n = atomic_fetch_add_explicit(&dbg, 1, memory_order_relaxed);
if (n < 4) {
pool_class_v2* hc = &ctx->cls[class_idx];
fprintf(stderr,
"[POOL_V2 page_of_fail] cls=%u ptr=%p reason=%d cur=%p cur_base=%p cur_cap=%u cur_bs=%u partial=%p\n",
class_idx, raw_block, pageof_reason,
(void*)hc->current,
hc->current ? hc->current->base : NULL,
hc->current ? hc->current->capacity : 0u,
hc->current ? hc->current->block_size : 0u,
(void*)hc->partial);
}
}
return 0; // let caller fall back to v1
}
*(void**)raw_block = p->freelist;
p->freelist = raw_block;
if (p->used > 0) p->used--;
pool_hotbox_v2_record_free_fast(class_idx);
pool_class_v2* hc = &ctx->cls[class_idx];
if (p->used == 0) {
pool_hotbox_v2_unlink_partial(hc, p);
if (hc->current == p) hc->current = NULL;
if (hc->partial_count < hc->max_partial_pages) {
pool_hotbox_v2_push_partial(hc, p);
} else {
pool_hotbox_v2_page_retire_slow(ctx, class_idx, p);
}
} else {
if (!hc->current) hc->current = p;
}
return 1;
}
__attribute__((destructor)) static void pool_hotbox_v2_dump_stats(void) {
if (!pool_hotbox_v2_stats_enabled()) return;
for (int i = 0; i < POOL_NUM_CLASSES; i++) {
uint64_t ac = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].alloc_calls, memory_order_relaxed);
uint64_t ar = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].alloc_refill, memory_order_relaxed);
uint64_t arf = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].alloc_refill_fail, memory_order_relaxed);
uint64_t afb = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].alloc_fallback_v1, memory_order_relaxed);
uint64_t fc = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].free_calls, memory_order_relaxed);
uint64_t ffb = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].free_fallback_v1, memory_order_relaxed);
uint64_t af = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].alloc_fast, memory_order_relaxed);
uint64_t ff = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].free_fast, memory_order_relaxed);
uint64_t pf_hdr = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].page_of_fail_header_missing, memory_order_relaxed);
uint64_t pf_range = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].page_of_fail_out_of_range, memory_order_relaxed);
uint64_t pf_mis = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].page_of_fail_misaligned, memory_order_relaxed);
uint64_t pf_unknown = atomic_load_explicit(&g_pool_hotbox_v2_stats[i].page_of_fail_unknown, memory_order_relaxed);
if (ac || afb || fc || ffb || ar || arf || af || ff || pf_hdr || pf_range || pf_mis || pf_unknown) {
fprintf(stderr, "[POOL_V2_STATS] cls=%d alloc_calls=%llu alloc_fast=%llu alloc_refill=%llu alloc_refill_fail=%llu alloc_fb_v1=%llu free_calls=%llu free_fast=%llu free_fb_v1=%llu pageof_hdr=%llu pageof_range=%llu pageof_misaligned=%llu pageof_unknown=%llu\n",
i, (unsigned long long)ac, (unsigned long long)af, (unsigned long long)ar,
(unsigned long long)arf, (unsigned long long)afb,
(unsigned long long)fc, (unsigned long long)ff, (unsigned long long)ffb,
(unsigned long long)pf_hdr, (unsigned long long)pf_range, (unsigned long long)pf_mis, (unsigned long long)pf_unknown);
}
}
}
// Size class table (for O(1) lookup). Index 5/6 are Bridge classes for 32-64KB gap.
// 7 classes including Bridge classes (40KB, 52KB) to fill 32-64KB gap
static size_t g_class_sizes[POOL_NUM_CLASSES] = {
@ -893,10 +1407,9 @@ int hak_pool_get_shard_index(uintptr_t site_id) {
return (int)((uint32_t)x & (POOL_NUM_SHARDS - 1));
}
// TLS helpers
// TLS helpers (non-inline helpers for shard bookkeeping)
#include "box/pool_tls_core.inc.h"
// Refill/ACE (boxed)
#include "box/pool_refill.inc.h"