- Implement tiny_page_box.c/h: per-thread page cache between UC and Shared Pool - Integrate Page Box into Unified Cache refill path - Remove legacy SuperSlab implementation (merged into smallmid) - Add HAKMEM_TINY_PAGE_BOX_CLASSES env var for selective class enabling - Update bench_random_mixed.c with Page Box statistics Current status: Implementation safe, no regressions. Page Box ON/OFF shows minimal difference - pool strategy needs tuning. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
61 lines
2.2 KiB
C
61 lines
2.2 KiB
C
#ifndef HAKMEM_SHARED_POOL_INTERNAL_H
|
||
#define HAKMEM_SHARED_POOL_INTERNAL_H
|
||
|
||
#include "hakmem_shared_pool.h"
|
||
#include "hakmem_tiny_superslab.h"
|
||
#include "hakmem_tiny_superslab_constants.h"
|
||
#include <stdatomic.h>
|
||
#include <pthread.h>
|
||
|
||
// Global Shared Pool Instance
|
||
extern SharedSuperSlabPool g_shared_pool;
|
||
|
||
// Lock Statistics
|
||
// Counters are defined always to avoid compilation errors in Release build
|
||
// (usage is guarded by g_lock_stats_enabled which is 0 in Release)
|
||
extern _Atomic uint64_t g_lock_acquire_count;
|
||
extern _Atomic uint64_t g_lock_release_count;
|
||
extern _Atomic uint64_t g_lock_acquire_slab_count;
|
||
extern _Atomic uint64_t g_lock_release_slab_count;
|
||
extern int g_lock_stats_enabled;
|
||
|
||
#if !HAKMEM_BUILD_RELEASE
|
||
void lock_stats_init(void);
|
||
#else
|
||
static inline void lock_stats_init(void) {
|
||
// No-op for release build
|
||
}
|
||
#endif
|
||
|
||
// Stage Statistics
|
||
extern _Atomic uint64_t g_sp_stage1_hits[TINY_NUM_CLASSES_SS];
|
||
extern _Atomic uint64_t g_sp_stage2_hits[TINY_NUM_CLASSES_SS];
|
||
extern _Atomic uint64_t g_sp_stage3_hits[TINY_NUM_CLASSES_SS];
|
||
extern int g_sp_stage_stats_enabled;
|
||
void sp_stage_stats_init(void);
|
||
|
||
// Per-class lock acquisition statistics(Stage2/Stage3 の alloc_lock 観測用)
|
||
extern _Atomic uint64_t g_sp_stage2_lock_acquired_by_class[TINY_NUM_CLASSES_SS];
|
||
extern _Atomic uint64_t g_sp_stage3_lock_acquired_by_class[TINY_NUM_CLASSES_SS];
|
||
|
||
// Internal Helpers (Shared between acquire/release/pool)
|
||
void shared_pool_ensure_capacity_unlocked(uint32_t min_capacity);
|
||
SuperSlab* sp_internal_allocate_superslab(int class_idx);
|
||
|
||
// Slot & Meta Helpers
|
||
int sp_slot_mark_active(SharedSSMeta* meta, int slot_idx, int class_idx);
|
||
int sp_slot_mark_empty(SharedSSMeta* meta, int slot_idx);
|
||
int sp_slot_claim_lockfree(SharedSSMeta* meta, int class_idx);
|
||
SharedSSMeta* sp_meta_find_or_create(SuperSlab* ss);
|
||
void sp_meta_sync_slots_from_ss(SharedSSMeta* meta, SuperSlab* ss);
|
||
|
||
// Free List Helpers
|
||
int sp_freelist_push_lockfree(int class_idx, SharedSSMeta* meta, int slot_idx);
|
||
int sp_freelist_pop_lockfree(int class_idx, SharedSSMeta** meta_out, int* slot_idx_out);
|
||
|
||
// Policy & Geometry Helpers
|
||
uint32_t sp_class_active_limit(int class_idx);
|
||
void sp_fix_geometry_if_needed(SuperSlab* ss, int slab_idx, int class_idx);
|
||
|
||
#endif // HAKMEM_SHARED_POOL_INTERNAL_H
|