Files
hakmem/core/box/pool_mf2_helpers.inc.h

159 lines
5.9 KiB
C
Raw Normal View History

// Forward declarations
static void* mf2_alloc_slow(int class_idx, size_t size, uintptr_t site_id);
// ===========================================================================
// Helper Functions (Clean & Modular)
// ===========================================================================
// Helper: Make page active (move old active to full_pages)
static inline void mf2_make_page_active(MF2_ThreadPages* tp, int class_idx, MidPage* page) {
if (!tp || !page) return;
// Move old active page to full_pages (if any)
if (tp->active_page[class_idx]) {
MidPage* old_active = tp->active_page[class_idx];
old_active->next_page = tp->full_pages[class_idx];
tp->full_pages[class_idx] = old_active;
}
// Set new page as active
tp->active_page[class_idx] = page;
page->next_page = NULL;
}
// Helper: Drain page and add to partial list (LIFO for cache locality)
// Returns true if page has free blocks after drain
static inline bool mf2_try_drain_to_partial(MF2_ThreadPages* tp, int class_idx, MidPage* page) {
if (!tp || !page) return false;
// Drain remote frees
int drained = mf2_drain_remote_frees(page);
// If page has freelist after drain, add to partial list (LIFO)
if (page->freelist) {
atomic_fetch_add(&g_mf2_page_reuse_count, 1);
page->next_page = tp->partial_pages[class_idx];
tp->partial_pages[class_idx] = page;
return true;
}
// No freelist, return to full_pages
page->next_page = tp->full_pages[class_idx];
tp->full_pages[class_idx] = page;
return false;
}
// Helper: Drain page and activate if successful (Direct Handoff - backward compat)
// Returns true if page was activated
static inline bool mf2_try_drain_and_activate(MF2_ThreadPages* tp, int class_idx, MidPage* page) {
if (!tp || !page) return false;
// Drain remote frees
int drained = mf2_drain_remote_frees(page);
// If page has freelist after drain, make it active immediately
if (page->freelist) {
atomic_fetch_add(&g_mf2_page_reuse_count, 1);
mf2_make_page_active(tp, class_idx, page);
return true;
}
// No freelist, return to full_pages
page->next_page = tp->full_pages[class_idx];
tp->full_pages[class_idx] = page;
return false;
}
// Helper: Try to reuse pages from own pending queue (must-reuse gate part 1)
// Returns true if a page was successfully drained and activated
static bool mf2_try_reuse_own_pending(MF2_ThreadPages* tp, int class_idx) {
if (!tp) return false;
// Budget: Process up to N pages to avoid blocking
for (int budget = 0; budget < MF2_PENDING_QUEUE_BUDGET; budget++) {
MidPage* pending_page = mf2_dequeue_pending(tp, class_idx);
if (!pending_page) break; // Queue empty
atomic_fetch_add(&g_mf2_pending_drained, 1);
// Clear pending flag (no longer in queue)
atomic_store_explicit(&pending_page->in_remote_pending, false, memory_order_release);
// DIRECT HANDOFF: Drain and activate if successful
if (mf2_try_drain_and_activate(tp, class_idx, pending_page)) {
return true; // Success! Page is now active
}
// No freelist after drain, page returned to full_pages by helper
}
return false; // No pages available for reuse
}
// Helper: Try to drain remotes from active page (must-reuse gate part 2)
// Returns true if active page has freelist after drain
static bool mf2_try_drain_active_remotes(MF2_ThreadPages* tp, int class_idx) {
if (!tp) return false;
MidPage* page = tp->active_page[class_idx];
if (!page) return false;
atomic_fetch_add(&g_mf2_slow_checked_drain, 1);
unsigned int remote_cnt = atomic_load_explicit(&page->remote_count, memory_order_seq_cst);
if (remote_cnt > 0) {
atomic_fetch_add(&g_mf2_slow_found_remote, 1);
int drained = mf2_drain_remote_frees(page);
if (drained > 0 && page->freelist) {
atomic_fetch_add(&g_mf2_drain_success, 1);
return true; // Success! Active page now has freelist
}
}
return false; // No remotes or drain failed
}
// Helper: Allocate new page and make it active
// Returns the newly allocated page (or NULL on OOM)
static MidPage* mf2_alloc_and_activate_new_page(MF2_ThreadPages* tp, int class_idx) {
if (!tp) return NULL;
atomic_fetch_add(&g_mf2_new_page_count, 1);
// DEBUG: Log why we're allocating new page (first N samples)
static _Atomic int new_page_samples = 0;
int sample_idx = atomic_fetch_add_explicit(&new_page_samples, 1, memory_order_relaxed);
if (sample_idx < MF2_DEBUG_SAMPLE_COUNT) {
// Count adoptable pages across all threads
int total_adoptable = 0;
for (int i = 0; i < POOL_NUM_CLASSES; i++) {
total_adoptable += atomic_load_explicit(&g_adoptable_count[i], memory_order_relaxed);
}
MF2_DEBUG_LOG("NEW_PAGE %d: class=%d, own_pending=%p, adoptable_total=%d, active=%p, full=%p",
sample_idx, class_idx,
(void*)atomic_load_explicit(&tp->pages_remote_pending[class_idx], memory_order_relaxed),
total_adoptable,
tp->active_page[class_idx],
tp->full_pages[class_idx]);
}
MidPage* page = mf2_alloc_new_page(class_idx);
if (!page) {
return NULL; // OOM
}
// Move current active page to full list (if any)
if (tp->active_page[class_idx]) {
MidPage* old_page = tp->active_page[class_idx];
old_page->next_page = tp->full_pages[class_idx];
tp->full_pages[class_idx] = old_page;
}
// Set new page as active
tp->active_page[class_idx] = page;
tp->page_count[class_idx]++;
return page;
}
// ===========================================================================
// End of Helper Functions
// ===========================================================================