Files
hakorune/src/runtime/gc_controller.rs
nyash-codex 6ecd8f7f52 feat(runtime): Phase 103 CoreServices Optional化 - Memory Constraints対応
- Add CoreServicesConfig struct (from_env, minimal, all_enabled)
- Implement with_core_from_registry_optional() for selective initialization
- Update CoreBoxesImpl fields to Option<Arc<dyn XyzService>>
- Maintain backward compatibility (with_core_from_registry calls all_enabled)
- Add NYASH_CORE_DISABLE_* environment variable support
- ConsoleBox remains mandatory (Graceful Degradation principle)
- Add unit tests for optional initialization
- Update console_println! macro to handle Option type
- Fix direct console.println() calls in vm.rs and selfhost.rs
- Create core_optional_design.md documentation

Note: Phase 104 will extend ConsoleService to be optional as well with
graceful fallback in console_println! macro.

Files modified:
- src/runtime/plugin_host.rs (CoreServicesConfig, with_core_from_registry_optional, tests)
- src/runtime/core_services.rs (CoreBoxesImpl fields → Option type)
- src/runtime/mod.rs (console_println! macro updated)
- src/runner/modes/vm.rs (handle Option console)
- src/runner/selfhost.rs (handle Option console)
- docs/development/current/main/core_optional_design.md (new)
- docs/development/current/main/ring0-inventory.md (Phase 103 entry)

Test results:
- Build:  Success (0 errors, 7 warnings)
- Unit tests:  3/3 passed (optional_core_tests)
- Runtime tests:  63/63 passed
- Smoke tests:  30/31 passed (1 pre-existing timeout)
2025-12-03 13:59:06 +09:00

215 lines
7.8 KiB
Rust

//! Unified GC controller (skeleton)
//! Implements GcHooks and centralizes mode selection and metrics.
use std::sync::atomic::{AtomicU64, Ordering};
use super::gc::{BarrierKind, GcHooks};
use super::gc_mode::GcMode;
use crate::config::env;
use crate::runtime::gc_trace;
use crate::runtime::get_global_ring0;
use std::collections::{HashSet, VecDeque};
pub struct GcController {
mode: GcMode,
safepoints: AtomicU64,
barrier_reads: AtomicU64,
barrier_writes: AtomicU64,
alloc_bytes: AtomicU64,
alloc_count: AtomicU64,
sp_since_last: AtomicU64,
bytes_since_last: AtomicU64,
collect_sp_interval: Option<u64>,
collect_alloc_bytes: Option<u64>,
// Diagnostics: last trial reachability counters
trial_nodes_last: AtomicU64,
trial_edges_last: AtomicU64,
// Diagnostics: collection counters and last duration/flags
collect_count_total: AtomicU64,
collect_by_sp: AtomicU64,
collect_by_alloc: AtomicU64,
trial_duration_last_ms: AtomicU64,
trial_reason_last: AtomicU64, // bitflags: 1=sp, 2=alloc
}
impl GcController {
pub fn new(mode: GcMode) -> Self {
Self {
mode,
safepoints: AtomicU64::new(0),
barrier_reads: AtomicU64::new(0),
barrier_writes: AtomicU64::new(0),
alloc_bytes: AtomicU64::new(0),
alloc_count: AtomicU64::new(0),
sp_since_last: AtomicU64::new(0),
bytes_since_last: AtomicU64::new(0),
collect_sp_interval: env::gc_collect_sp_interval(),
collect_alloc_bytes: env::gc_collect_alloc_bytes(),
trial_nodes_last: AtomicU64::new(0),
trial_edges_last: AtomicU64::new(0),
collect_count_total: AtomicU64::new(0),
collect_by_sp: AtomicU64::new(0),
collect_by_alloc: AtomicU64::new(0),
trial_duration_last_ms: AtomicU64::new(0),
trial_reason_last: AtomicU64::new(0),
}
}
pub fn mode(&self) -> GcMode {
self.mode
}
pub fn snapshot(&self) -> (u64, u64, u64) {
(
self.safepoints.load(Ordering::Relaxed),
self.barrier_reads.load(Ordering::Relaxed),
self.barrier_writes.load(Ordering::Relaxed),
)
}
}
impl GcHooks for GcController {
fn safepoint(&self) {
// Off mode: minimal overhead but still callable
if self.mode != GcMode::Off {
self.safepoints.fetch_add(1, Ordering::Relaxed);
let sp = self.sp_since_last.fetch_add(1, Ordering::Relaxed) + 1;
// Opportunistic collection trigger
let sp_hit = self
.collect_sp_interval
.map(|n| n > 0 && sp >= n)
.unwrap_or(false);
let bytes = self.bytes_since_last.load(Ordering::Relaxed);
let bytes_hit = self
.collect_alloc_bytes
.map(|n| n > 0 && bytes >= n)
.unwrap_or(false);
if sp_hit || bytes_hit {
// Record reason flags for diagnostics
let mut flags: u64 = 0;
if sp_hit {
flags |= 1;
self.collect_by_sp.fetch_add(1, Ordering::Relaxed);
}
if bytes_hit {
flags |= 2;
self.collect_by_alloc.fetch_add(1, Ordering::Relaxed);
}
self.trial_reason_last.store(flags, Ordering::Relaxed);
self.run_trial_collection();
}
}
// Future: per-mode collection/cooperation hooks
}
fn barrier(&self, kind: BarrierKind) {
if self.mode == GcMode::Off {
return;
}
match kind {
BarrierKind::Read => {
self.barrier_reads.fetch_add(1, Ordering::Relaxed);
}
BarrierKind::Write => {
self.barrier_writes.fetch_add(1, Ordering::Relaxed);
}
}
}
fn snapshot_counters(&self) -> Option<(u64, u64, u64)> {
Some(self.snapshot())
}
fn alloc(&self, bytes: u64) {
if self.mode == GcMode::Off {
return;
}
self.alloc_count.fetch_add(1, Ordering::Relaxed);
self.alloc_bytes.fetch_add(bytes, Ordering::Relaxed);
self.bytes_since_last.fetch_add(bytes, Ordering::Relaxed);
}
}
impl GcController {
pub fn alloc_totals(&self) -> (u64, u64) {
(
self.alloc_count.load(Ordering::Relaxed),
self.alloc_bytes.load(Ordering::Relaxed),
)
}
}
impl GcController {
fn run_trial_collection(&self) {
// Reset windows
self.sp_since_last.store(0, Ordering::Relaxed);
self.bytes_since_last.store(0, Ordering::Relaxed);
// Only run for rc/rc+cycle/stw; rc+cycle is default.
match self.mode {
GcMode::Rc | GcMode::RcCycle | GcMode::STW => {
let started = std::time::Instant::now();
// Roots: HostHandle registry + modules_registry (Arc<dyn NyashBox>)
let mut roots: Vec<std::sync::Arc<dyn crate::box_trait::NyashBox>> =
crate::runtime::host_handles::snapshot();
let mut mod_roots = crate::runtime::modules_registry::snapshot_boxes();
roots.append(&mut mod_roots);
let mut visited: HashSet<u64> = HashSet::new();
let mut q: VecDeque<std::sync::Arc<dyn crate::box_trait::NyashBox>> =
VecDeque::new();
for r in roots.into_iter() {
let id = r.box_id();
if visited.insert(id) {
q.push_back(r);
}
}
let mut nodes: u64 = visited.len() as u64;
let mut edges: u64 = 0;
while let Some(cur) = q.pop_front() {
gc_trace::trace_children(&*cur, &mut |child| {
edges += 1;
let id = child.box_id();
if visited.insert(id) {
nodes += 1;
q.push_back(child);
}
});
}
// Store last diagnostics (available for JSON metrics)
self.trial_nodes_last.store(nodes, Ordering::Relaxed);
self.trial_edges_last.store(edges, Ordering::Relaxed);
if (nodes + edges) > 0 && crate::config::env::gc_metrics() {
get_global_ring0().log.info(&format!(
"[GC] trial: reachable nodes={} edges={} (roots=jit_handles)",
nodes, edges
));
}
// Update counters
let dur = started.elapsed();
let ms = dur.as_millis() as u64;
self.trial_duration_last_ms.store(ms, Ordering::Relaxed);
self.collect_count_total.fetch_add(1, Ordering::Relaxed);
// Reason flags derive from current env thresholds vs last windows reaching triggers
// Note: we set flags in safepoint() where triggers were decided.
}
_ => {}
}
}
}
impl GcController {
pub fn trial_reachability_last(&self) -> (u64, u64) {
(
self.trial_nodes_last.load(Ordering::Relaxed),
self.trial_edges_last.load(Ordering::Relaxed),
)
}
pub fn collection_totals(&self) -> (u64, u64, u64) {
(
self.collect_count_total.load(Ordering::Relaxed),
self.collect_by_sp.load(Ordering::Relaxed),
self.collect_by_alloc.load(Ordering::Relaxed),
)
}
pub fn trial_duration_last_ms(&self) -> u64 {
self.trial_duration_last_ms.load(Ordering::Relaxed)
}
pub fn trial_reason_last_bits(&self) -> u64 {
self.trial_reason_last.load(Ordering::Relaxed)
}
}