Files
hakmem/core/hakmem_tiny_bg_spill.c
Moe Charm (CI) acc64f2438 Phase ML1: Pool v1 memset 89.73% overhead 軽量化 (+15.34% improvement)
## Summary
- ChatGPT により bench_profile.h の setenv segfault を修正(RTLD_NEXT 経由に切り替え)
- core/box/pool_zero_mode_box.h 新設:ENV キャッシュ経由で ZERO_MODE を統一管理
- core/hakmem_pool.c で zero mode に応じた memset 制御(FULL/header/off)
- A/B テスト結果:ZERO_MODE=header で +15.34% improvement(1M iterations, C6-heavy)

## Files Modified
- core/box/pool_api.inc.h: pool_zero_mode_box.h include
- core/bench_profile.h: glibc setenv → malloc+putenv(segfault 回避)
- core/hakmem_pool.c: zero mode 参照・制御ロジック
- core/box/pool_zero_mode_box.h (新設): enum/getter
- CURRENT_TASK.md: Phase ML1 結果記載

## Test Results
| Iterations | ZERO_MODE=full | ZERO_MODE=header | Improvement |
|-----------|----------------|-----------------|------------|
| 10K       | 3.06 M ops/s   | 3.17 M ops/s    | +3.65%     |
| 1M        | 23.71 M ops/s  | 27.34 M ops/s   | **+15.34%** |

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
2025-12-10 09:08:18 +09:00

107 lines
4.2 KiB
C

#include "hakmem_tiny_bg_spill.h"
#include "hakmem_tiny_superslab.h" // For SuperSlab, TinySlabMeta, ss_active_dec_one
#include "hakmem_super_registry.h" // For hak_super_registry_lookup
#include "tiny_remote.h"
#include "hakmem_tiny.h"
#include "box/tiny_next_ptr_box.h" // Phase E1-CORRECT: Box API
#include <pthread.h>
static inline uint32_t tiny_self_u32_guard(void) {
return (uint32_t)(uintptr_t)pthread_self();
}
#include <stdlib.h>
// Global variables
int g_bg_spill_enable = 0; // ENV toggle removed (fixed OFF)
int g_bg_spill_target = 128; // Fixed target
int g_bg_spill_max_batch = 128; // Fixed batch
_Atomic uintptr_t g_bg_spill_head[TINY_NUM_CLASSES];
_Atomic uint32_t g_bg_spill_len[TINY_NUM_CLASSES];
void bg_spill_init(void) {
// Initialize atomic queues (spill disabled by default)
for (int k = 0; k < TINY_NUM_CLASSES; k++) {
atomic_store_explicit(&g_bg_spill_head[k], (uintptr_t)0, memory_order_relaxed);
atomic_store_explicit(&g_bg_spill_len[k], 0u, memory_order_relaxed);
}
}
void bg_spill_drain_class(int class_idx, pthread_mutex_t* lock) {
uint32_t approx = atomic_load_explicit(&g_bg_spill_len[class_idx], memory_order_relaxed);
if (approx == 0) return;
uintptr_t chain = atomic_exchange_explicit(&g_bg_spill_head[class_idx], (uintptr_t)0, memory_order_acq_rel);
if (chain == 0) return;
// Split chain up to max_batch
int processed = 0;
void* rest = NULL;
void* cur = (void*)chain;
void* prev = NULL;
// Phase 7: header-aware next pointer (C0-C6: base+1, C7: base)
#if HAKMEM_TINY_HEADER_CLASSIDX
// Phase E1-CORRECT: ALL classes have 1-byte header, next ptr at offset 1
const size_t next_off = 1;
#else
const size_t next_off = 0;
#endif
(void)next_off;
#include "box/tiny_next_ptr_box.h"
while (cur && processed < g_bg_spill_max_batch) {
prev = cur;
cur = tiny_next_read(class_idx, cur);
processed++;
}
if (cur != NULL) { rest = cur; tiny_next_write(class_idx, prev, NULL); }
// Return processed nodes to SS freelists
pthread_mutex_lock(lock);
uint32_t self_tid = tiny_self_u32_guard();
void* node = (void*)chain;
while (node) {
SuperSlab* owner_ss = hak_super_lookup(node);
void* next = tiny_next_read(class_idx, node);
if (owner_ss && owner_ss->magic == SUPERSLAB_MAGIC) {
int slab_idx = slab_index_for(owner_ss, node);
if (slab_idx >= 0 && slab_idx < ss_slabs_capacity(owner_ss)) {
TinySlabMeta* meta = &owner_ss->slabs[slab_idx];
uint8_t node_class_idx = (meta->class_idx < TINY_NUM_CLASSES)
? meta->class_idx
: (uint8_t)class_idx;
if (!tiny_remote_guard_allow_local_push(owner_ss, slab_idx, meta, node, "bg_spill", self_tid)) {
(void)ss_remote_push(owner_ss, slab_idx, node);
if (meta->used > 0) meta->used--;
node = next;
continue;
}
void* prev = meta->freelist;
// Phase 12: use per-slab class for next pointer
tiny_next_write(node_class_idx, node, prev);
meta->freelist = node;
tiny_failfast_log("bg_spill", node_class_idx, owner_ss, meta, node, prev);
meta->used--;
// Active was decremented at free time
}
}
node = next;
}
pthread_mutex_unlock(lock);
if (processed > 0) {
atomic_fetch_sub_explicit(&g_bg_spill_len[class_idx], (uint32_t)processed, memory_order_relaxed);
}
if (rest) {
// Prepend remainder back to head
uintptr_t old_head;
void* tail = rest;
while (tiny_next_read(class_idx, tail)) tail = tiny_next_read(class_idx, tail);
do {
old_head = atomic_load_explicit(&g_bg_spill_head[class_idx], memory_order_acquire);
tiny_next_write(class_idx, tail, (void*)old_head);
} while (!atomic_compare_exchange_weak_explicit(&g_bg_spill_head[class_idx], &old_head,
(uintptr_t)rest,
memory_order_release, memory_order_relaxed));
}
}