Files
hakmem/core/smallsegment_v7.c

272 lines
8.0 KiB
C
Raw Permalink Normal View History

// smallsegment_v7.c - SmallSegment v7 implementation (Phase v7-2: C6-only)
//
// Purpose:
// - 2MiB segment allocation with 2MiB alignment
// - Free page stack management
// - TLS segment access for fast path
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <stdint.h>
#include <unistd.h>
#include "box/smallsegment_v7_box.h"
#include "box/region_id_v6_box.h" // For REGION_KIND_SMALL_V7
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
// Invalid page index sentinel
#define SMALL_V7_PAGE_INVALID 0xFFFFFFFFu
// ============================================================================
// TLS Segment Slot
// ============================================================================
typedef struct {
SmallSegment_v7 seg;
int in_use;
void* mmap_base; // Actual mmap base (for munmap)
size_t mmap_size; // Actual mmap size (for munmap)
} TLSSegmentSlot_v7;
static __thread TLSSegmentSlot_v7 g_tls_segment_v7;
// ============================================================================
// Segment Allocation
// ============================================================================
SmallSegment_v7* small_segment_alloc_v7(uint32_t owner_tid) {
TLSSegmentSlot_v7* slot = &g_tls_segment_v7;
if (slot->in_use) {
return &slot->seg; // Already allocated
}
// Allocate 2MiB aligned segment via mmap
void* mem = mmap(NULL, SMALL_SEGMENT_V7_SIZE,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mem == MAP_FAILED || mem == NULL) {
return NULL;
}
uintptr_t addr = (uintptr_t)mem;
void* mmap_base = mem;
size_t mmap_size = SMALL_SEGMENT_V7_SIZE;
// Check if we got 2MiB alignment
if ((addr & (SMALL_SEGMENT_V7_SIZE - 1)) != 0) {
// Not aligned - need to reallocate with overallocation
munmap(mem, SMALL_SEGMENT_V7_SIZE);
// Allocate 4MiB to ensure we can find a 2MiB aligned region
size_t alloc_size = SMALL_SEGMENT_V7_SIZE * 2;
mem = mmap(NULL, alloc_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mem == MAP_FAILED || mem == NULL) {
return NULL;
}
// Find the aligned address within this region
uintptr_t raw_addr = (uintptr_t)mem;
addr = (raw_addr + SMALL_SEGMENT_V7_SIZE - 1) & ~((uintptr_t)SMALL_SEGMENT_V7_SIZE - 1);
// Verify the aligned address is within our mapping
if (addr < raw_addr || addr + SMALL_SEGMENT_V7_SIZE > raw_addr + alloc_size) {
munmap(mem, alloc_size);
return NULL;
}
mmap_base = mem;
mmap_size = alloc_size;
}
// Initialize segment structure in TLS
SmallSegment_v7* seg = &slot->seg;
slot->in_use = 1;
slot->mmap_base = mmap_base;
slot->mmap_size = mmap_size;
seg->base = addr;
seg->num_pages = SMALL_PAGES_PER_SEG_V7;
seg->owner_tid = owner_tid;
seg->flags = 0;
seg->magic = SMALL_SEGMENT_V7_MAGIC;
seg->region_kind = REGION_KIND_SMALL_V7;
seg->segment_idx = 0; // TODO: RegionIdBox integration
// Initialize free page stack (all pages are free)
// Build stack: page 0 -> page 1 -> ... -> page 31 -> INVALID
seg->free_page_head = 0;
seg->free_page_count = seg->num_pages;
// Initialize all page metadata and build free list
for (uint32_t i = 0; i < seg->num_pages; i++) {
SmallPageMeta_v7* m = &seg->page_meta[i];
// Hot fields
m->free_list = NULL;
m->used = 0;
m->capacity = 0; // 0 = page is free/unused
m->class_idx = 0;
m->flags = 0;
m->page_idx = (uint16_t)i;
m->reserved0 = 0;
m->segment = seg;
m->segment_next_partial = NULL;
// Cold fields
m->alloc_count = 0;
m->free_count = 0;
m->remote_free_count = 0;
m->live_current = 0;
m->peak_live = 0;
m->remote_burst_max = 0;
m->reserved1 = 0;
m->epoch_first_alloc = 0;
m->epoch_last_free = 0;
}
// Build intrusive free page stack using reserved0 as next pointer
// (reusing reserved0 temporarily when page is free)
for (uint32_t i = 0; i < seg->num_pages - 1; i++) {
seg->page_meta[i].reserved0 = (uint16_t)(i + 1);
}
seg->page_meta[seg->num_pages - 1].reserved0 = (uint16_t)SMALL_V7_PAGE_INVALID;
return seg;
}
void small_segment_free_v7(SmallSegment_v7* seg) {
if (!seg) return;
if (seg->magic != SMALL_SEGMENT_V7_MAGIC) return;
TLSSegmentSlot_v7* slot = &g_tls_segment_v7;
if (seg != &slot->seg) return; // Not our segment
seg->magic = 0; // Invalidate
munmap(slot->mmap_base, slot->mmap_size);
slot->in_use = 0;
slot->mmap_base = NULL;
slot->mmap_size = 0;
}
// ============================================================================
// Page Management (Free Page Stack)
// ============================================================================
SmallPageMeta_v7* small_segment_take_page_v7(SmallSegment_v7* seg, uint32_t class_idx) {
if (unlikely(!seg || !small_segment_v7_valid(seg))) {
return NULL;
}
if (seg->free_page_count == 0 || seg->free_page_head >= seg->num_pages) {
return NULL; // No free pages
}
// Pop from free page stack
uint32_t page_idx = seg->free_page_head;
SmallPageMeta_v7* page = &seg->page_meta[page_idx];
// Update stack head to next free page
seg->free_page_head = page->reserved0; // Next in stack
seg->free_page_count--;
// Initialize page for use
page->class_idx = (uint16_t)class_idx;
page->flags = 0;
page->used = 0;
page->capacity = 0; // Will be set by ColdIface during carve
page->free_list = NULL; // Will be built by ColdIface
page->segment_next_partial = NULL;
page->reserved0 = 0; // Clear stack pointer
// Reset stats for new allocation cycle
page->alloc_count = 0;
page->free_count = 0;
page->remote_free_count = 0;
page->live_current = 0;
page->peak_live = 0;
page->remote_burst_max = 0;
page->epoch_first_alloc = 0;
page->epoch_last_free = 0;
return page;
}
void small_segment_release_page_v7(SmallSegment_v7* seg, SmallPageMeta_v7* page) {
if (unlikely(!seg || !page)) return;
if (unlikely(!small_segment_v7_valid(seg))) return;
if (unlikely(page->segment != seg)) return;
if (unlikely(page->page_idx >= seg->num_pages)) return;
// Reset page state
page->free_list = NULL;
page->used = 0;
page->capacity = 0;
page->class_idx = 0;
page->flags = 0;
page->segment_next_partial = NULL;
// Push to free page stack
page->reserved0 = (uint16_t)seg->free_page_head;
seg->free_page_head = page->page_idx;
seg->free_page_count++;
}
// ============================================================================
// TLS Access
// ============================================================================
SmallSegment_v7* small_segment_v7_get_tls(void) {
TLSSegmentSlot_v7* slot = &g_tls_segment_v7;
if (likely(slot->in_use)) {
return &slot->seg;
}
return NULL; // Not initialized yet
}
SmallPageMeta_v7* small_page_meta_v7_of(void* ptr) {
if (unlikely(!ptr)) {
return NULL;
}
TLSSegmentSlot_v7* slot = &g_tls_segment_v7;
if (unlikely(!slot->in_use)) {
return NULL;
}
SmallSegment_v7* seg = &slot->seg;
// Check if ptr is within our segment range
if (unlikely(!small_ptr_in_segment_v7(seg, ptr))) {
return NULL;
}
// Calculate page index
uintptr_t addr = (uintptr_t)ptr;
size_t page_idx = SMALL_V7_PAGE_IDX(seg, addr);
if (unlikely(page_idx >= seg->num_pages)) {
return NULL;
}
SmallPageMeta_v7* page = &seg->page_meta[page_idx];
// Validate that this page is actually in use
if (unlikely(!small_page_v7_valid(page))) {
return NULL;
}
return page;
}