Files
hakmem/core/mid_hotbox_v3.c
Moe Charm (CI) 710541b69e MID-V3 Phase 3-5: RegionId integration, alloc/free implementation
- MID-V3-3: RegionId integration (page registration at carve)
  - mid_segment_v3_carve_page(): Register with RegionIdBox
  - mid_segment_v3_return_page(): Unregister from RegionIdBox
  - Uses REGION_KIND_MID_V3 for region identification

- MID-V3-4: Allocation fast path implementation
  - mid_hot_v3_alloc_slow(): Slow path for lane miss
  - mid_cold_v3_refill_page(): Segment-based page allocation
  - mid_lane_refill_from_page(): Batch transfer (16 items default)
  - mid_page_build_freelist(): Initial freelist construction

- MID-V3-5: Free/cold path implementation
  - mid_hot_v3_free(): RegionIdBox lookup based free
  - mid_page_push_free(): Page freelist push
  - Local/remote page detection via lane ownership

ENV controls (default OFF):
  HAKMEM_MID_V3_ENABLED=1
  HAKMEM_MID_V3_CLASSES=0xC0 (C6+C7)
  HAKMEM_MID_V3_DEBUG=1

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-12 00:53:42 +09:00

559 lines
17 KiB
C

// mid_hotbox_v3.c - Mid/Pool HotBox v3 Implementation
//
// Phase MID-V3-1: Stub implementation (skeleton only)
// Phase MID-V3-4/5: Full implementation
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <sys/mman.h>
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
#include "box/mid_hotbox_v3_box.h"
#include "box/mid_hotbox_v3_env_box.h"
#include "box/mid_cold_iface_v3.h"
#include "box/region_id_v6_box.h"
// ============================================================================
// TLS Context
// ============================================================================
static __thread MidHotBoxV3 g_mid_hot_ctx_v3;
static __thread int g_mid_hot_ctx_v3_init = 0;
MidHotBoxV3* mid_hot_box_v3_get(void) {
if (unlikely(!g_mid_hot_ctx_v3_init)) {
memset(&g_mid_hot_ctx_v3, 0, sizeof(g_mid_hot_ctx_v3));
g_mid_hot_ctx_v3.flags = MID_CTX_FLAG_INIT;
g_mid_hot_ctx_v3_init = 1;
}
return &g_mid_hot_ctx_v3;
}
// ============================================================================
// Allocation Fast Path (MID-V3-4)
// ============================================================================
// Forward declarations for slow path
static void* mid_hot_v3_alloc_slow(MidHotBoxV3* hot, int class_idx);
void* mid_hot_v3_alloc(MidHotBoxV3* hot, int class_idx) {
if (unlikely(!mid_v3_class_enabled((uint8_t)class_idx))) {
return NULL; // Class not enabled
}
if (!hot) {
hot = mid_hot_box_v3_get();
}
MidLaneV3* lane = &hot->lanes[class_idx];
// L0: TLS freelist cache hit
if (likely(lane->freelist_head)) {
void* blk = lane->freelist_head;
void* next = NULL;
memcpy(&next, blk, sizeof(void*));
lane->freelist_head = next;
lane->freelist_count--;
lane->alloc_count++;
return blk;
}
// L0 miss: slow path
return mid_hot_v3_alloc_slow(hot, class_idx);
}
static void* mid_hot_v3_alloc_slow(MidHotBoxV3* hot, int class_idx) {
MidLaneV3* lane = &hot->lanes[class_idx];
// Try to refill from current page
if (lane->page_idx != 0) {
MidPageDescV3* page = mid_page_from_idx(lane->page_idx);
if (page && page->freelist) {
// Batch transfer from page to lane
uint32_t batch_size = mid_v3_lane_batch_size();
mid_lane_refill_from_page(lane, page, batch_size);
// Retry fast path
if (lane->freelist_head) {
void* blk = lane->freelist_head;
void* next = NULL;
memcpy(&next, blk, sizeof(void*));
lane->freelist_head = next;
lane->freelist_count--;
lane->alloc_count++;
return blk;
}
}
}
// Cold path: Get new page
MidPageDescV3* new_page = mid_cold_v3_refill_page(hot, (uint32_t)class_idx);
if (!new_page) return NULL;
lane->page_idx = mid_page_to_idx(new_page);
// Build freelist and refill lane
uint32_t batch_size = mid_v3_lane_batch_size();
mid_lane_refill_from_page(lane, new_page, batch_size);
// Retry fast path
if (lane->freelist_head) {
void* blk = lane->freelist_head;
void* next = NULL;
memcpy(&next, blk, sizeof(void*));
lane->freelist_head = next;
lane->freelist_count--;
lane->alloc_count++;
return blk;
}
return NULL;
}
// ============================================================================
// Free Fast Path (MID-V3-5)
// ============================================================================
void mid_hot_v3_free(void* ptr) {
if (unlikely(!ptr)) return;
if (unlikely(!mid_v3_enabled())) return;
// RegionIdBox lookup (O(1) via TLS cache)
RegionLookupV6 lk = region_id_lookup_cached_v6(ptr);
if (lk.kind != REGION_KIND_MID_V3) {
// Not our allocation, ignore
return;
}
MidPageDescV3* page = (MidPageDescV3*)lk.page_meta;
if (!page) return;
// Check if local thread owns this page
MidHotBoxV3* hot = mid_hot_box_v3_get();
MidLaneV3* lane = &hot->lanes[page->class_idx];
if (lane->page_idx == mid_page_to_idx(page)) {
// Local page: direct push to lane freelist
void* next = lane->freelist_head;
memcpy(ptr, &next, sizeof(void*));
lane->freelist_head = ptr;
lane->freelist_count++;
lane->free_count++;
if (mid_v3_debug_enabled()) {
fprintf(stderr, "[MID_V3] Free to local lane: ptr=%p page=%p\n", ptr, page->base);
}
return;
}
// Remote page or unowned: push to page freelist
mid_page_push_free(page, ptr);
if (mid_v3_debug_enabled()) {
fprintf(stderr, "[MID_V3] Free to remote page: ptr=%p page=%p\n", ptr, page->base);
}
// Check if page should be retired (fully empty)
if (page->used == 0 && page->capacity > 0) {
// Page is now empty, consider retiring it
// For simplicity, we'll keep it around for now
// Production code would implement retirement threshold
}
}
// ============================================================================
// Ownership Check (MID-V3-3+)
// ============================================================================
int mid_hotbox_v3_can_own(int class_idx, void* ptr) {
if (unlikely(!mid_v3_class_enabled((uint8_t)class_idx))) {
return 0;
}
if (!ptr) return 0;
// RegionIdBox lookup
RegionLookupV6 lk = region_id_lookup_v6(ptr);
// Check if this is a MID v3 region and matches class
if (lk.kind == REGION_KIND_MID_V3 && lk.page_meta) {
MidPageDescV3* page = (MidPageDescV3*)lk.page_meta;
return (page->class_idx == (uint8_t)class_idx) ? 1 : 0;
}
return 0;
}
// ============================================================================
// Page Index Conversion Helpers
// ============================================================================
// Simple global page table (for stub implementation)
// In production, this would be a more sophisticated mapping
#define MID_MAX_PAGES 256
static MidPageDescV3* g_mid_page_table_v3[MID_MAX_PAGES];
static uint32_t g_mid_page_table_count_v3 = 0;
__attribute__((unused))
static uint32_t mid_page_to_idx_impl(MidPageDescV3* page) {
if (!page) return 0;
// Check if already in table
for (uint32_t i = 1; i < g_mid_page_table_count_v3; i++) {
if (g_mid_page_table_v3[i] == page) {
return i;
}
}
// Add to table
if (g_mid_page_table_count_v3 < MID_MAX_PAGES) {
uint32_t idx = g_mid_page_table_count_v3++;
g_mid_page_table_v3[idx] = page;
return idx;
}
return 0; // Table full
}
__attribute__((unused))
static MidPageDescV3* mid_page_from_idx_impl(uint32_t idx) {
if (idx == 0 || idx >= g_mid_page_table_count_v3) {
return NULL;
}
return g_mid_page_table_v3[idx];
}
// Update the inline stubs in the header by defining actual implementations
#undef mid_page_to_idx
#undef mid_page_from_idx
#define mid_page_to_idx(page) mid_page_to_idx_impl(page)
#define mid_page_from_idx(idx) mid_page_from_idx_impl(idx)
// ============================================================================
// Cold Interface Implementation (MID-V3-4/5)
// ============================================================================
MidPageDescV3* mid_cold_v3_refill_page(MidHotBoxV3* hot, uint32_t class_idx) {
(void)hot;
if (!mid_v3_class_enabled((uint8_t)class_idx)) return NULL;
// Acquire segment
MidSegmentV3* seg = mid_segment_v3_acquire((int)class_idx);
if (!seg) return NULL;
// Carve a new page
MidPageDescV3* page = mid_segment_v3_carve_page(seg, (int)class_idx);
if (!page) return NULL;
// Build initial freelist
void* freelist = mid_page_build_freelist(page);
page->freelist = freelist;
if (mid_v3_debug_enabled()) {
fprintf(stderr, "[MID_V3] Refilled page: base=%p capacity=%u class=%u\n",
page->base, page->capacity, class_idx);
}
return page;
}
void mid_cold_v3_retire_page(MidHotBoxV3* hot, MidPageDescV3* page) {
(void)hot;
if (!page) return;
if (mid_v3_debug_enabled()) {
fprintf(stderr, "[MID_V3] Retiring page: base=%p used=%u/%u\n",
page->base, page->used, page->capacity);
}
// Return page to segment
MidSegmentV3* seg = page->segment;
if (seg) {
mid_segment_v3_return_page(seg, page);
}
}
bool mid_cold_v3_remote_push(MidPageDescV3* page, void* ptr, uint32_t tid) {
(void)page; (void)ptr; (void)tid;
// Stub: Remote free not yet implemented
return false;
}
void mid_cold_v3_remote_drain(MidHotBoxV3* hot) {
(void)hot;
// Stub: Remote drain not yet implemented
}
// ============================================================================
// Segment Operations (MID-V3-3/4/5)
// ============================================================================
// Global segment pool (simple fixed-size array for now)
#define MID_MAX_SEGMENTS 8
static MidSegmentV3* g_mid_segments_v3[MID_MAX_SEGMENTS];
static int g_mid_segments_v3_init = 0;
// Helper: Get stride (block size) for class
static inline size_t mid_stride_for_class_v3(int class_idx) {
// Reuse tiny geometry if available, or define locally
static const size_t strides[] = {
16, // C0
32, // C1
48, // C2
64, // C3
96, // C4
144, // C5
256, // C6
1024 // C7
};
if (class_idx < 0 || class_idx >= (int)(sizeof(strides)/sizeof(strides[0]))) {
return 0;
}
return strides[class_idx];
}
MidSegmentV3* mid_segment_v3_acquire(int class_idx) {
if (!mid_v3_class_enabled((uint8_t)class_idx)) return NULL;
// Simple implementation: allocate one segment per class (TLS-like)
if (!g_mid_segments_v3_init) {
memset(g_mid_segments_v3, 0, sizeof(g_mid_segments_v3));
g_mid_segments_v3_init = 1;
}
if (class_idx < 0 || class_idx >= MID_MAX_SEGMENTS) return NULL;
if (!g_mid_segments_v3[class_idx]) {
// Allocate new segment via mmap
size_t seg_size = MID_SEGMENT_V3_SIZE;
void* base = mmap(NULL, seg_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (base == MAP_FAILED) {
return NULL;
}
// Allocate segment descriptor (separate from segment data)
size_t meta_size = sizeof(MidSegmentV3) + MID_PAGES_PER_SEGMENT * sizeof(MidPageDescV3);
MidSegmentV3* seg = (MidSegmentV3*)malloc(meta_size);
if (!seg) {
munmap(base, seg_size);
return NULL;
}
memset(seg, 0, meta_size);
seg->base = (uintptr_t)base;
seg->size = seg_size;
seg->magic = MID_SEGMENT_V3_MAGIC;
seg->num_pages = MID_PAGES_PER_SEGMENT;
seg->class_idx = (uint8_t)class_idx;
seg->flags = 0;
seg->region_id = 0; // Will be set when pages are carved
g_mid_segments_v3[class_idx] = seg;
}
return g_mid_segments_v3[class_idx];
}
MidPageDescV3* mid_segment_v3_carve_page(MidSegmentV3* seg, int class_idx) {
if (!seg || seg->magic != MID_SEGMENT_V3_MAGIC) return NULL;
if (!mid_v3_class_enabled((uint8_t)class_idx)) return NULL;
// Find first unused page slot
MidPageDescV3* page_meta_array = (MidPageDescV3*)((uint8_t*)seg + sizeof(MidSegmentV3));
for (uint32_t i = 0; i < seg->num_pages; i++) {
MidPageDescV3* page = &page_meta_array[i];
if (page->capacity == 0) {
// This page slot is available
size_t stride = mid_stride_for_class_v3(class_idx);
if (stride == 0) return NULL;
uint8_t* page_base = (uint8_t*)(seg->base + (i * MID_PAGE_V3_SIZE));
uint32_t capacity = (uint32_t)(MID_PAGE_V3_SIZE / stride);
// Initialize page descriptor
page->base = page_base;
page->capacity = capacity;
page->used = 0;
page->block_size = (uint32_t)stride;
page->class_idx = (uint8_t)class_idx;
page->flags = MID_PAGE_FLAG_ACTIVE;
page->freelist = NULL; // Will be built by caller
page->slab_ref = NULL;
page->segment = seg;
page->next = NULL;
// MID-V3-3: Register with RegionIdBox
page->region_id = region_id_register_v6(
page_base,
capacity * stride,
REGION_KIND_MID_V3,
page
);
if (mid_v3_debug_enabled()) {
fprintf(stderr, "[MID_V3] Carved page: base=%p capacity=%u stride=%u region_id=%u\n",
page_base, capacity, (uint32_t)stride, page->region_id);
}
return page;
}
}
return NULL; // Segment exhausted
}
void mid_segment_v3_return_page(MidSegmentV3* seg, MidPageDescV3* page) {
if (!seg || !page) return;
if (seg->magic != MID_SEGMENT_V3_MAGIC) return;
// MID-V3-3: Unregister from RegionIdBox
if (page->region_id != 0) {
region_id_unregister_v6(page->region_id);
page->region_id = 0;
}
if (mid_v3_debug_enabled()) {
fprintf(stderr, "[MID_V3] Returned page: base=%p capacity=%u\n",
page->base, page->capacity);
}
// Reset page descriptor
page->capacity = 0;
page->used = 0;
page->flags = 0;
page->freelist = NULL;
page->base = NULL;
}
// ============================================================================
// Lane Operations (MID-V3-4/5)
// ============================================================================
void mid_lane_refill_from_page(MidLaneV3* lane, MidPageDescV3* page, uint32_t batch_size) {
if (!lane || !page) return;
// Transfer up to batch_size items from page freelist to lane
uint32_t transferred = 0;
void* head = page->freelist;
while (head && transferred < batch_size) {
void* next = NULL;
memcpy(&next, head, sizeof(void*));
// Push to lane freelist
void* lane_next = lane->freelist_head;
memcpy(head, &lane_next, sizeof(void*));
lane->freelist_head = head;
lane->freelist_count++;
transferred++;
head = next;
}
// Update page freelist
page->freelist = head;
if (mid_v3_debug_enabled() && transferred > 0) {
fprintf(stderr, "[MID_V3] Lane refill: transferred=%u from page=%p\n",
transferred, page->base);
}
}
void mid_lane_flush_to_page(MidLaneV3* lane, MidPageDescV3* page) {
if (!lane || !page) return;
// Transfer all items from lane back to page
while (lane->freelist_head) {
void* blk = lane->freelist_head;
void* next = NULL;
memcpy(&next, blk, sizeof(void*));
// Push to page freelist
void* page_next = page->freelist;
memcpy(blk, &page_next, sizeof(void*));
page->freelist = blk;
lane->freelist_head = next;
lane->freelist_count--;
}
}
// ============================================================================
// Page Operations (MID-V3-4/5)
// ============================================================================
void* mid_page_build_freelist(MidPageDescV3* page) {
if (!page || !page->base) return NULL;
// Build freelist by linking all blocks in reverse order
void* head = NULL;
size_t stride = page->block_size;
for (uint32_t i = page->capacity; i > 0; i--) {
uint8_t* blk = page->base + ((i - 1) * stride);
void* next = head;
memcpy(blk, &next, sizeof(void*));
head = blk;
}
if (mid_v3_debug_enabled()) {
fprintf(stderr, "[MID_V3] Built freelist: page=%p capacity=%u stride=%zu\n",
page->base, page->capacity, stride);
}
return head;
}
void mid_page_push_free(MidPageDescV3* page, void* ptr) {
if (!page || !ptr) return;
// Push to page freelist (thread-safe for now, could use atomic later)
void* next = page->freelist;
memcpy(ptr, &next, sizeof(void*));
page->freelist = ptr;
if (page->used > 0) {
page->used--;
}
}
void* mid_page_pop_free(MidPageDescV3* page) {
if (!page || !page->freelist) return NULL;
void* blk = page->freelist;
void* next = NULL;
memcpy(&next, blk, sizeof(void*));
page->freelist = next;
page->used++;
return blk;
}
// ============================================================================
// Debug
// ============================================================================
void mid_hot_v3_dump_stats(void) {
if (!mid_v3_debug_enabled()) return;
MidHotBoxV3* hot = mid_hot_box_v3_get();
fprintf(stderr, "[MID_V3] HotBox stats:\n");
for (int i = 0; i < MID_V3_NUM_CLASSES; i++) {
if (!mid_v3_class_enabled((uint8_t)i)) continue;
MidLaneV3* lane = &hot->lanes[i];
fprintf(stderr, " C%d: page_idx=%u freelist_count=%u alloc=%u free=%u\n",
i, lane->page_idx, lane->freelist_count,
lane->alloc_count, lane->free_count);
}
}