// pool_refill.inc.h — Box: L2 Pool refill and adaptive bundling #ifndef POOL_REFILL_INC_H #define POOL_REFILL_INC_H // Adjust bundle factor per class based on windowed hits/misses static inline void pool_update_bundle_factor(int class_idx) { uint64_t h = g_pool.hits[class_idx]; uint64_t m = g_pool.misses[class_idx]; uint64_t dh = h - g_pool.last_hits[class_idx]; uint64_t dm = m - g_pool.last_misses[class_idx]; uint64_t dt = dh + dm; if (dt < 256) return; // wait for window to accumulate int bf = g_pool.bundle_factor[class_idx]; if (bf <= 0) bf = 1; if (dt > 0) { double hit_rate = (double)dh / (double)dt; if (hit_rate < 0.60 && dm > (dh + 16)) { if (bf < 4) bf++; } else if (hit_rate > 0.90 && dh > (dm + 32)) { if (bf > 1) bf--; } } g_pool.bundle_factor[class_idx] = bf; g_pool.last_hits[class_idx] = h; g_pool.last_misses[class_idx] = m; } // Refill freelist by allocating a new 64KiB page and splitting to blocks // Returns: 1 on success, 0 on failure static inline int refill_freelist(int class_idx, int shard_idx) { if (class_idx < 0 || class_idx >= POOL_NUM_CLASSES) return 0; if (shard_idx < 0 || shard_idx >= POOL_NUM_SHARDS) return 0; size_t user_size = g_class_sizes[class_idx]; size_t block_size = HEADER_SIZE + user_size; int blocks_per_page = POOL_PAGE_SIZE / block_size; if (blocks_per_page == 0) return 0; void* page = mmap(NULL, POOL_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (!page) return 0; // Update bundle factor based on windowed stats pool_update_bundle_factor(class_idx); int bundles = g_pool.bundle_factor[class_idx]; if (bundles < 1) bundles = 1; if (bundles > 4) bundles = 4; // Soft CAP guidance from FrozenPolicy const FrozenPolicy* pol = hkm_policy_get(); if (pol) { uint16_t cap = 0; if (class_idx < 5) cap = pol->mid_cap[class_idx]; else if (class_idx == 5 && pol->mid_dyn1_bytes != 0) cap = pol->mid_cap_dyn1; else if (class_idx == 6 && pol->mid_dyn2_bytes != 0) cap = pol->mid_cap_dyn2; if (cap > 0) { uint64_t have = g_pool.pages_by_class[class_idx]; if (have >= cap) { bundles = 1; // over cap: refill minimally } else { uint64_t deficit = (cap - have); if (deficit < (uint64_t)bundles) bundles = (int)deficit; if (bundles < 1) bundles = 1; if (bundles > 4) bundles = 4; if (deficit >= (uint64_t)g_pool_min_bundle && bundles < g_pool_min_bundle) bundles = g_pool_min_bundle; } } } int pages_allocated_this_call = 0; for (int b = 0; b < bundles; b++) { // Split page into blocks and link into freelist PoolBlock* freelist_head = NULL; for (int i = 0; i < blocks_per_page; i++) { void* raw_block = (char*)page + (i * block_size); __builtin_prefetch((char*)raw_block + block_size, 1, 1); PoolBlock* block = (PoolBlock*)raw_block; block->next = freelist_head; freelist_head = block; } if (g_pool.freelist[class_idx][shard_idx]) { PoolBlock* tail = freelist_head; while (tail->next) tail = tail->next; tail->next = g_pool.freelist[class_idx][shard_idx]; } g_pool.freelist[class_idx][shard_idx] = freelist_head; // Register this 64KiB page (shared owner) mid_desc_register(page, class_idx, 0); if (b + 1 < bundles) { page = mmap(NULL, POOL_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (!page) break; } pages_allocated_this_call++; } set_nonempty_bit(class_idx, shard_idx); g_pool.refills[class_idx]++; g_pool.total_pages_allocated += pages_allocated_this_call; g_pool.pages_by_class[class_idx] += pages_allocated_this_call; g_pool.total_bytes_allocated += (uint64_t)pages_allocated_this_call * (uint64_t)POOL_PAGE_SIZE; return 1; } #endif // POOL_REFILL_INC_H