Restore C7 Warm/TLS carve for release and add policy scaffolding

This commit is contained in:
Moe Charm (CI)
2025-12-06 01:34:04 +09:00
parent d17ec46628
commit 03538055ae
15 changed files with 588 additions and 164 deletions

View File

@ -12,10 +12,14 @@
#include "front/tiny_warm_pool.h" // Warm Pool: Prefill during registry scans
#include "box/ss_slab_reset_box.h" // Box: Reset slab metadata on reuse (C7 guard)
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdatomic.h>
// Stage3(LRU) 由来の Superslab をトレースするための簡易マジック
_Atomic uintptr_t g_c7_stage3_magic_ss = 0;
static inline void c7_log_meta_state(const char* tag, SuperSlab* ss, int slab_idx) {
if (!ss) return;
#if HAKMEM_BUILD_RELEASE
@ -357,7 +361,8 @@ stage1_retry_after_tension_drain:
if (class_idx == 7) {
TinySlabMeta* meta = &ss_guard->slabs[reuse_slot_idx];
if (!c7_meta_is_pristine(meta)) {
int meta_ok = (meta->used == 0) && (meta->carved == 0) && (meta->freelist == NULL);
if (!meta_ok) {
c7_log_skip_nonempty_acquire(ss_guard, reuse_slot_idx, meta, "SKIP_NONEMPTY_ACQUIRE");
sp_freelist_push_lockfree(class_idx, reuse_meta, reuse_slot_idx);
goto stage2_fallback;
@ -418,6 +423,17 @@ stage1_retry_after_tension_drain:
*ss_out = ss;
*slab_idx_out = reuse_slot_idx;
if (class_idx == 7) {
TinySlabMeta* meta_check = &ss->slabs[reuse_slot_idx];
if (!((meta_check->used == 0) && (meta_check->carved == 0) && (meta_check->freelist == NULL))) {
sp_freelist_push_lockfree(class_idx, reuse_meta, reuse_slot_idx);
if (g_lock_stats_enabled == 1) {
atomic_fetch_add(&g_lock_release_count, 1);
}
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
goto stage2_fallback;
}
}
if (c7_reset_and_log_if_needed(ss, reuse_slot_idx, class_idx) != 0) {
*ss_out = NULL;
*slab_idx_out = -1;
@ -497,7 +513,9 @@ stage2_fallback:
if (class_idx == 7) {
TinySlabMeta* meta = &ss->slabs[claimed_idx];
if (!c7_meta_is_pristine(meta)) {
int meta_ok = (meta->used == 0) && (meta->carved == 0) &&
(meta->freelist == NULL);
if (!meta_ok) {
c7_log_skip_nonempty_acquire(ss, claimed_idx, meta, "SKIP_NONEMPTY_ACQUIRE");
sp_slot_mark_empty(hint_meta, claimed_idx);
if (g_lock_stats_enabled == 1) {
@ -523,6 +541,20 @@ stage2_fallback:
// Hint is still good, no need to update
*ss_out = ss;
*slab_idx_out = claimed_idx;
if (class_idx == 7) {
TinySlabMeta* meta_check = &ss->slabs[claimed_idx];
if (!((meta_check->used == 0) && (meta_check->carved == 0) &&
(meta_check->freelist == NULL))) {
sp_slot_mark_empty(hint_meta, claimed_idx);
*ss_out = NULL;
*slab_idx_out = -1;
if (g_lock_stats_enabled == 1) {
atomic_fetch_add(&g_lock_release_count, 1);
}
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
goto stage2_scan;
}
}
if (c7_reset_and_log_if_needed(ss, claimed_idx, class_idx) != 0) {
*ss_out = NULL;
*slab_idx_out = -1;
@ -613,7 +645,9 @@ stage2_scan:
if (class_idx == 7) {
TinySlabMeta* meta_slab = &ss->slabs[claimed_idx];
if (!c7_meta_is_pristine(meta_slab)) {
int meta_ok = (meta_slab->used == 0) && (meta_slab->carved == 0) &&
(meta_slab->freelist == NULL);
if (!meta_ok) {
c7_log_skip_nonempty_acquire(ss, claimed_idx, meta_slab, "SKIP_NONEMPTY_ACQUIRE");
sp_slot_mark_empty(meta, claimed_idx);
if (g_lock_stats_enabled == 1) {
@ -641,6 +675,20 @@ stage2_scan:
*ss_out = ss;
*slab_idx_out = claimed_idx;
if (class_idx == 7) {
TinySlabMeta* meta_check = &ss->slabs[claimed_idx];
if (!((meta_check->used == 0) && (meta_check->carved == 0) &&
(meta_check->freelist == NULL))) {
sp_slot_mark_empty(meta, claimed_idx);
*ss_out = NULL;
*slab_idx_out = -1;
if (g_lock_stats_enabled == 1) {
atomic_fetch_add(&g_lock_release_count, 1);
}
pthread_mutex_unlock(&g_shared_pool.alloc_lock);
continue;
}
}
if (c7_reset_and_log_if_needed(ss, claimed_idx, class_idx) != 0) {
*ss_out = NULL;
*slab_idx_out = -1;
@ -721,9 +769,14 @@ stage2_scan:
// Stage 3a: Try LRU cache
extern SuperSlab* hak_ss_lru_pop(uint8_t size_class);
new_ss = hak_ss_lru_pop((uint8_t)class_idx);
int from_lru = (new_ss != NULL);
int from_lru = 0;
if (class_idx != 7) {
new_ss = hak_ss_lru_pop((uint8_t)class_idx);
from_lru = (new_ss != NULL);
} else {
// C7: Stage3 LRU 再利用は一旦封じる(再利用が汚染源かを切り分ける)
atomic_store_explicit(&g_c7_stage3_magic_ss, 0, memory_order_relaxed);
}
// Stage 3b: If LRU miss, allocate new SuperSlab
if (!new_ss) {
@ -752,6 +805,10 @@ stage2_scan:
}
new_ss = allocated_ss;
if (class_idx == 7) {
// Stage3 経由の C7 Superslab は新規確保のみmagic もリセット扱い)
atomic_store_explicit(&g_c7_stage3_magic_ss, 0, memory_order_relaxed);
}
// Add newly allocated SuperSlab to the shared pool's internal array
if (g_shared_pool.total_count >= g_shared_pool.capacity) {
@ -771,6 +828,29 @@ stage2_scan:
g_shared_pool.total_count++;
}
// C7: LRU 再利用・新規確保いずれでも、空スラブに完全リセットしてから返す
if (class_idx == 7 && new_ss) {
int cap = ss_slabs_capacity(new_ss);
new_ss->slab_bitmap = 0;
new_ss->nonempty_mask = 0;
new_ss->freelist_mask = 0;
new_ss->empty_mask = 0;
new_ss->empty_count = 0;
new_ss->active_slabs = 0;
new_ss->hot_count = 0;
new_ss->cold_count = 0;
for (int s = 0; s < cap; s++) {
ss_slab_reset_meta_for_tiny(new_ss, s, class_idx);
}
static _Atomic uint32_t rel_stage3_reset_logs = 0;
uint32_t n = atomic_fetch_add_explicit(&rel_stage3_reset_logs, 1, memory_order_relaxed);
if (n < 4) {
fprintf(stderr,
"[REL_C7_STAGE3_RESET] ss=%p from_lru=%d cap=%d\n",
(void*)new_ss, from_lru, cap);
}
}
#if !HAKMEM_BUILD_RELEASE
if (dbg_acquire == 1 && new_ss) {
fprintf(stderr, "[SP_ACQUIRE_STAGE3] class=%d new SuperSlab (ss=%p from_lru=%d)\n",