Fix MID v3.5 activation bugs: policy loop + malloc recursion

Two critical bugs fixed:

1. Policy snapshot infinite loop (smallobject_policy_v7.c):
   - Condition `g_policy_v7_version == 0` caused reinit on every call
   - Fixed via CAS to set global version to 1 after first init

2. Malloc recursion (smallobject_segment_mid_v3.c):
   - Internal malloc() routed back through hakmem → MID v3.5 → segment
     creation → malloc → infinite recursion / stack overflow
   - Fixed by using mmap() directly for internal allocations:
     - Segment struct, pages array, page metadata block

Performance results (bench_random_mixed 257-512B):
- Baseline (LEGACY): 34.0M ops/s
- MID_V35 ON (C6):   35.8M ops/s
- Improvement:       +5.1% ✓

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Moe Charm (CI)
2025-12-12 07:12:24 +09:00
parent 212739607a
commit d5ffb3eeb2
7 changed files with 117 additions and 85 deletions

View File

@ -8,19 +8,7 @@
#include "box/smallobject_segment_mid_v3_box.h"
#include "box/region_id_v6_box.h"
// ============================================================================
// SmallPageMeta - Page metadata for MID v3
// ============================================================================
typedef struct SmallPageMeta {
void *ptr; // Page base pointer
uint32_t capacity; // Slots per page
uint8_t class_idx; // Size class (C5-C7)
uint32_t alloc_count; // Total allocations on this page
uint32_t free_count; // Total frees on this page
void *segment; // Back-pointer to SmallSegment_MID_v3
struct SmallPageMeta *next; // For free stack linking
} SmallPageMeta;
// SmallPageMeta is now defined in smallobject_segment_mid_v3_box.h
// ============================================================================
// Geometry Constants
@ -52,9 +40,26 @@ static uint32_t class_idx_to_slots(uint32_t class_idx) {
// Segment Lifecycle
// ============================================================================
// Helper: mmap-based internal allocation to avoid recursion
// (malloc() would route back through hakmem → MID v3.5 → infinite loop)
static inline void* internal_mmap_alloc(size_t size) {
void* p = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
return (p == MAP_FAILED) ? NULL : p;
}
static inline void internal_mmap_free(void* p, size_t size) {
if (p) munmap(p, size);
}
// Size constants for internal allocations
#define SEG_STRUCT_SIZE (sizeof(SmallSegment_MID_v3))
#define PAGES_ARRAY_SIZE (sizeof(SmallPageMeta*) * SMALL_MID_PAGES_PER_SEG)
#define PAGE_META_SIZE (sizeof(SmallPageMeta))
SmallSegment_MID_v3* small_segment_mid_v3_create(void) {
// 1. Allocate SmallSegment_MID_v3 structure
SmallSegment_MID_v3 *seg = malloc(sizeof(*seg));
// 1. Allocate SmallSegment_MID_v3 structure via mmap (avoid malloc recursion)
SmallSegment_MID_v3 *seg = internal_mmap_alloc(SEG_STRUCT_SIZE);
if (!seg) return NULL;
// 2. mmap 2MiB contiguous memory
@ -62,7 +67,7 @@ SmallSegment_MID_v3* small_segment_mid_v3_create(void) {
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (seg->start == MAP_FAILED) {
free(seg);
internal_mmap_free(seg, SEG_STRUCT_SIZE);
return NULL;
}
@ -78,27 +83,27 @@ SmallSegment_MID_v3* small_segment_mid_v3_create(void) {
seg->page_offset[i] = 0;
}
// 4. Allocate and initialize page metadata array
seg->pages = malloc(sizeof(SmallPageMeta*) * SMALL_MID_PAGES_PER_SEG);
// 4. Allocate pages array via mmap
seg->pages = internal_mmap_alloc(PAGES_ARRAY_SIZE);
if (!seg->pages) {
munmap(seg->start, SMALL_MID_SEGMENT_SIZE);
free(seg);
internal_mmap_free(seg, SEG_STRUCT_SIZE);
return NULL;
}
// Allocate individual page metadata structures
// 5. Allocate page metadata as single block via mmap
// Layout: 32 SmallPageMeta structs contiguously
SmallPageMeta *meta_block = internal_mmap_alloc(PAGE_META_SIZE * SMALL_MID_PAGES_PER_SEG);
if (!meta_block) {
internal_mmap_free(seg->pages, PAGES_ARRAY_SIZE);
munmap(seg->start, SMALL_MID_SEGMENT_SIZE);
internal_mmap_free(seg, SEG_STRUCT_SIZE);
return NULL;
}
// Initialize individual page metadata structures
for (uint32_t i = 0; i < SMALL_MID_PAGES_PER_SEG; i++) {
SmallPageMeta *meta = malloc(sizeof(SmallPageMeta));
if (!meta) {
// Cleanup on failure
for (uint32_t j = 0; j < i; j++) {
free(seg->pages[j]);
}
free(seg->pages);
munmap(seg->start, SMALL_MID_SEGMENT_SIZE);
free(seg);
return NULL;
}
SmallPageMeta *meta = &meta_block[i];
meta->ptr = (char*)seg->start + (i * SMALL_MID_PAGE_SIZE);
meta->capacity = 0;
@ -111,7 +116,7 @@ SmallSegment_MID_v3* small_segment_mid_v3_create(void) {
seg->pages[i] = meta;
}
// 5. Register with RegionIdBox
// 6. Register with RegionIdBox
seg->region_id = region_id_register_v6(seg->start, seg->total_size,
REGION_KIND_MID_V3, seg);
@ -128,14 +133,22 @@ void small_segment_mid_v3_destroy(SmallSegment_MID_v3 *seg) {
// Unregister from RegionIdBox
region_id_unregister_v6(seg->region_id);
// Free page metadata
// Free page metadata (allocated as single block)
if (seg->pages && seg->pages[0]) {
// All metadata is in a single contiguous block starting at pages[0]
internal_mmap_free(seg->pages[0], PAGE_META_SIZE * SMALL_MID_PAGES_PER_SEG);
}
// Free pages array
if (seg->pages) {
for (uint32_t i = 0; i < SMALL_MID_PAGES_PER_SEG; i++) {
if (seg->pages[i]) {
free(seg->pages[i]);
}
internal_mmap_free(seg->pages, PAGES_ARRAY_SIZE);
}
// Free per-class free page stacks (if any were allocated)
for (int i = 0; i < 8; i++) {
if (seg->free_pages[i]) {
internal_mmap_free(seg->free_pages[i], sizeof(void*) * SMALL_MID_PAGES_PER_SEG);
}
free(seg->pages);
}
// Unmap segment memory
@ -143,7 +156,7 @@ void small_segment_mid_v3_destroy(SmallSegment_MID_v3 *seg) {
munmap(seg->start, seg->total_size);
}
free(seg);
internal_mmap_free(seg, SEG_STRUCT_SIZE);
}
// ============================================================================
@ -151,19 +164,28 @@ void small_segment_mid_v3_destroy(SmallSegment_MID_v3 *seg) {
// ============================================================================
// Take a page from the free stack (LIFO)
// If class-specific stack is empty, allocate from unassigned pages
void* small_segment_mid_v3_take_page(SmallSegment_MID_v3 *seg, uint32_t class_idx) {
if (!seg || class_idx >= 8) return NULL;
// First: try class-specific free stack
if (seg->free_count[class_idx] > 0) {
// Pop from free stack
void **stack = seg->free_pages[class_idx];
if (!stack || seg->free_count[class_idx] == 0) return NULL;
if (stack && seg->free_count[class_idx] > 0) {
void *page = stack[seg->free_count[class_idx] - 1];
seg->free_count[class_idx]--;
return page;
}
}
// Get the top page
void *page = stack[seg->free_count[class_idx] - 1];
seg->free_count[class_idx]--;
return page;
// Second: find an unassigned page from the pages array
for (uint32_t i = 0; i < seg->num_pages; i++) {
SmallPageMeta *meta = seg->pages[i];
if (meta && meta->class_idx == 0xFF) {
// Found unassigned page - assign to this class
meta->class_idx = class_idx;
return meta->ptr;
}
}
// No free pages available
@ -174,9 +196,9 @@ void* small_segment_mid_v3_take_page(SmallSegment_MID_v3 *seg, uint32_t class_id
void small_segment_mid_v3_release_page(SmallSegment_MID_v3 *seg, void *page, uint32_t class_idx) {
if (!seg || !page || class_idx >= 8) return;
// Allocate stack if needed (lazy initialization)
// Allocate stack if needed (lazy initialization) - use mmap to avoid malloc recursion
if (!seg->free_pages[class_idx]) {
seg->free_pages[class_idx] = malloc(sizeof(void*) * SMALL_MID_PAGES_PER_SEG);
seg->free_pages[class_idx] = internal_mmap_alloc(sizeof(void*) * SMALL_MID_PAGES_PER_SEG);
if (!seg->free_pages[class_idx]) return;
}
@ -209,7 +231,7 @@ bool small_segment_mid_v3_contains_page(SmallSegment_MID_v3 *seg, void *page) {
}
// Get page metadata for pointer
struct SmallPageMeta* small_segment_mid_v3_get_page_meta(
SmallPageMeta_MID_v3* small_segment_mid_v3_get_page_meta(
SmallSegment_MID_v3 *seg,
void *page
) {
@ -226,7 +248,7 @@ struct SmallPageMeta* small_segment_mid_v3_get_page_meta(
if (page_idx >= SMALL_MID_PAGES_PER_SEG) return NULL;
return (struct SmallPageMeta*)seg->pages[page_idx];
return seg->pages[page_idx];
}
// ============================================================================