// smallobject_segment_mid_v3.c // Phase v11a-2: Multi-class MID v3.5 segment implementation (L2 physical layer) #include #include #include #include #include "box/smallobject_segment_mid_v3_box.h" #include "box/region_id_v6_box.h" #include "box/smallobject_mid_v35_geom_box.h" // Phase MID-V35-HOTPATH-OPT-1: geometry SSOT // SmallPageMeta is now defined in smallobject_segment_mid_v3_box.h // ============================================================================ // Geometry Constants // ============================================================================ #define SMALL_MID_SEGMENT_SIZE (2 * 1024 * 1024) // 2 MiB #define SMALL_MID_PAGE_SIZE (64 * 1024) // 64 KiB #define SMALL_MID_PAGES_PER_SEG (SMALL_MID_SEGMENT_SIZE / SMALL_MID_PAGE_SIZE) // 32 // ============================================================================ // Helper: class_idx to slots mapping // ============================================================================ // Phase MID-V35-HOTPATH-OPT-1: Use geom_box as Single Source of Truth // See: core/box/smallobject_mid_v35_geom_box.h // (Removed local class_idx_to_slots() which had wrong C6 value: 102 instead of 128) // ============================================================================ // Segment Lifecycle // ============================================================================ // Helper: mmap-based internal allocation to avoid recursion // (malloc() would route back through hakmem → MID v3.5 → infinite loop) static inline void* internal_mmap_alloc(size_t size) { void* p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); return (p == MAP_FAILED) ? NULL : p; } static inline void internal_mmap_free(void* p, size_t size) { if (p) munmap(p, size); } // Size constants for internal allocations #define SEG_STRUCT_SIZE (sizeof(SmallSegment_MID_v3)) #define PAGES_ARRAY_SIZE (sizeof(SmallPageMeta*) * SMALL_MID_PAGES_PER_SEG) #define PAGE_META_SIZE (sizeof(SmallPageMeta)) SmallSegment_MID_v3* small_segment_mid_v3_create(void) { // 1. Allocate SmallSegment_MID_v3 structure via mmap (avoid malloc recursion) SmallSegment_MID_v3 *seg = internal_mmap_alloc(SEG_STRUCT_SIZE); if (!seg) return NULL; // 2. mmap 2MiB contiguous memory seg->start = mmap(NULL, SMALL_MID_SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (seg->start == MAP_FAILED) { internal_mmap_free(seg, SEG_STRUCT_SIZE); return NULL; } seg->total_size = SMALL_MID_SEGMENT_SIZE; seg->page_size = SMALL_MID_PAGE_SIZE; seg->num_pages = SMALL_MID_PAGES_PER_SEG; // 3. Initialize per-class free page stacks for (int i = 0; i < 8; i++) { seg->free_pages[i] = NULL; seg->free_count[i] = 0; seg->current_page[i] = NULL; seg->page_offset[i] = 0; } // 4. Allocate pages array via mmap seg->pages = internal_mmap_alloc(PAGES_ARRAY_SIZE); if (!seg->pages) { munmap(seg->start, SMALL_MID_SEGMENT_SIZE); internal_mmap_free(seg, SEG_STRUCT_SIZE); return NULL; } // 5. Allocate page metadata as single block via mmap // Layout: 32 SmallPageMeta structs contiguously SmallPageMeta *meta_block = internal_mmap_alloc(PAGE_META_SIZE * SMALL_MID_PAGES_PER_SEG); if (!meta_block) { internal_mmap_free(seg->pages, PAGES_ARRAY_SIZE); munmap(seg->start, SMALL_MID_SEGMENT_SIZE); internal_mmap_free(seg, SEG_STRUCT_SIZE); return NULL; } // Initialize individual page metadata structures for (uint32_t i = 0; i < SMALL_MID_PAGES_PER_SEG; i++) { SmallPageMeta *meta = &meta_block[i]; meta->ptr = (char*)seg->start + (i * SMALL_MID_PAGE_SIZE); meta->capacity = 0; meta->class_idx = 0xFF; // Unassigned meta->alloc_count = 0; meta->free_count = 0; meta->segment = seg; meta->next = NULL; seg->pages[i] = meta; } // 6. Register with RegionIdBox seg->region_id = region_id_register_v6(seg->start, seg->total_size, REGION_KIND_MID_V3, seg); seg->total_allocations = 0; seg->total_frees = 0; seg->last_refill_count = 0; return seg; } void small_segment_mid_v3_destroy(SmallSegment_MID_v3 *seg) { if (!seg) return; // Unregister from RegionIdBox region_id_unregister_v6(seg->region_id); // Free page metadata (allocated as single block) if (seg->pages && seg->pages[0]) { // All metadata is in a single contiguous block starting at pages[0] internal_mmap_free(seg->pages[0], PAGE_META_SIZE * SMALL_MID_PAGES_PER_SEG); } // Free pages array if (seg->pages) { internal_mmap_free(seg->pages, PAGES_ARRAY_SIZE); } // Free per-class free page stacks (if any were allocated) for (int i = 0; i < 8; i++) { if (seg->free_pages[i]) { internal_mmap_free(seg->free_pages[i], sizeof(void*) * SMALL_MID_PAGES_PER_SEG); } } // Unmap segment memory if (seg->start && seg->start != MAP_FAILED) { munmap(seg->start, seg->total_size); } internal_mmap_free(seg, SEG_STRUCT_SIZE); } // ============================================================================ // Page Management // ============================================================================ // Take a page from the free stack (LIFO) // If class-specific stack is empty, allocate from unassigned pages void* small_segment_mid_v3_take_page(SmallSegment_MID_v3 *seg, uint32_t class_idx) { if (!seg || class_idx >= 8) return NULL; // First: try class-specific free stack if (seg->free_count[class_idx] > 0) { void **stack = seg->free_pages[class_idx]; if (stack && seg->free_count[class_idx] > 0) { void *page = stack[seg->free_count[class_idx] - 1]; seg->free_count[class_idx]--; return page; } } // Second: find an unassigned page from the pages array for (uint32_t i = 0; i < seg->num_pages; i++) { SmallPageMeta *meta = seg->pages[i]; if (meta && meta->class_idx == 0xFF) { // Found unassigned page - assign to this class meta->class_idx = class_idx; return meta->ptr; } } // No free pages available return NULL; } // Release a page back to the free stack (LIFO) void small_segment_mid_v3_release_page(SmallSegment_MID_v3 *seg, void *page, uint32_t class_idx) { if (!seg || !page || class_idx >= 8) return; // Allocate stack if needed (lazy initialization) - use mmap to avoid malloc recursion if (!seg->free_pages[class_idx]) { seg->free_pages[class_idx] = internal_mmap_alloc(sizeof(void*) * SMALL_MID_PAGES_PER_SEG); if (!seg->free_pages[class_idx]) return; } // Push to free stack seg->free_pages[class_idx][seg->free_count[class_idx]] = page; seg->free_count[class_idx]++; } // Get region ID uint32_t small_segment_mid_v3_region_id(SmallSegment_MID_v3 *seg) { return seg ? seg->region_id : 0; } // Set region ID (called by RegionIdBox) void small_segment_mid_v3_set_region_id(SmallSegment_MID_v3 *seg, uint32_t region_id) { if (seg) { seg->region_id = region_id; } } // Check if page is within segment bounds bool small_segment_mid_v3_contains_page(SmallSegment_MID_v3 *seg, void *page) { if (!seg || !page) return false; uintptr_t page_addr = (uintptr_t)page; uintptr_t seg_start = (uintptr_t)seg->start; uintptr_t seg_end = seg_start + seg->total_size; return (page_addr >= seg_start && page_addr < seg_end); } // Get page metadata for pointer SmallPageMeta_MID_v3* small_segment_mid_v3_get_page_meta( SmallSegment_MID_v3 *seg, void *page ) { if (!seg || !page || !seg->pages) return NULL; // Calculate page index uintptr_t page_addr = (uintptr_t)page; uintptr_t seg_start = (uintptr_t)seg->start; if (page_addr < seg_start) return NULL; uintptr_t offset = page_addr - seg_start; uint32_t page_idx = offset / SMALL_MID_PAGE_SIZE; if (page_idx >= SMALL_MID_PAGES_PER_SEG) return NULL; return seg->pages[page_idx]; } // ============================================================================ // Statistics // ============================================================================ SmallSegmentStatsMID_v3 small_segment_mid_v3_get_stats(SmallSegment_MID_v3 *seg) { SmallSegmentStatsMID_v3 stats = {0}; if (!seg) return stats; stats.total_allocations = seg->total_allocations; stats.total_frees = seg->total_frees; // Count active pages per class for (uint32_t i = 0; i < SMALL_MID_PAGES_PER_SEG; i++) { SmallPageMeta *meta = seg->pages[i]; if (meta && meta->class_idx < 8 && meta->class_idx != 0xFF) { stats.active_pages[meta->class_idx]++; } } return stats; } void small_segment_mid_v3_reset_stats(SmallSegment_MID_v3 *seg) { if (!seg) return; seg->total_allocations = 0; seg->total_frees = 0; seg->last_refill_count = 0; } // ============================================================================ // Validation & Debugging // ============================================================================ bool small_segment_mid_v3_validate(SmallSegment_MID_v3 *seg) { if (!seg) return false; if (!seg->start || seg->start == MAP_FAILED) return false; if (seg->total_size != SMALL_MID_SEGMENT_SIZE) return false; if (seg->num_pages != SMALL_MID_PAGES_PER_SEG) return false; if (!seg->pages) return false; return true; } void small_segment_mid_v3_debug_print(SmallSegment_MID_v3 *seg) { if (!seg) { fprintf(stderr, "[MID_v3] Segment: NULL\n"); return; } fprintf(stderr, "[MID_v3] Segment %p:\n", (void*)seg); fprintf(stderr, " start=%p size=%zu pages=%u region_id=%u\n", seg->start, seg->total_size, seg->num_pages, seg->region_id); fprintf(stderr, " allocs=%lu frees=%lu\n", seg->total_allocations, seg->total_frees); for (int i = 0; i < 8; i++) { if (seg->free_count[i] > 0) { fprintf(stderr, " C%d: free_pages=%u\n", i, seg->free_count[i]); } } }