docs/ci: selfhost bootstrap/exe-first workflows; add ny-llvmc scaffolding + JSON v0 schema validation; plan: unify to Nyash ABI v2 (no backwards compat)

This commit is contained in:
Selfhosting Dev
2025-09-17 20:33:19 +09:00
parent a5054a271b
commit 4ea3ca2685
56 changed files with 2275 additions and 1623 deletions

View File

@ -4,97 +4,74 @@ use once_cell::sync::OnceCell;
use std::sync::{Arc, RwLock};
use super::scheduler::CancellationToken;
use super::{gc::GcHooks, scheduler::Scheduler};
use super::{gc::BarrierKind, gc::GcHooks, scheduler::Scheduler};
static GLOBAL_GC: OnceCell<RwLock<Option<Arc<dyn GcHooks>>>> = OnceCell::new();
static GLOBAL_SCHED: OnceCell<RwLock<Option<Arc<dyn Scheduler>>>> = OnceCell::new();
// Phase 2 scaffold: current task group's cancellation token (no-op default)
static GLOBAL_CUR_TOKEN: OnceCell<RwLock<Option<CancellationToken>>> = OnceCell::new();
// Phase 2 scaffold: current group's child futures registry (best-effort)
static GLOBAL_GROUP_FUTURES: OnceCell<RwLock<Vec<crate::boxes::future::FutureWeak>>> =
OnceCell::new();
// Strong ownership list for implicit group (pre-TaskGroup actualization)
static GLOBAL_GROUP_STRONG: OnceCell<RwLock<Vec<crate::boxes::future::FutureBox>>> =
OnceCell::new();
// Simple scope depth counter for implicit group (join-at-scope-exit footing)
static TASK_SCOPE_DEPTH: OnceCell<RwLock<usize>> = OnceCell::new();
// TaskGroup scope stack (explicit group ownership per function scope)
static TASK_GROUP_STACK: OnceCell<
RwLock<Vec<std::sync::Arc<crate::boxes::task_group_box::TaskGroupInner>>>,
> = OnceCell::new();
// Unified global runtime hooks state (single lock for consistency)
struct GlobalHooksState {
gc: Option<Arc<dyn GcHooks>>,
sched: Option<Arc<dyn Scheduler>>,
cur_token: Option<CancellationToken>,
futures: Vec<crate::boxes::future::FutureWeak>,
strong: Vec<crate::boxes::future::FutureBox>,
scope_depth: usize,
group_stack: Vec<std::sync::Arc<crate::boxes::task_group_box::TaskGroupInner>>,
}
fn gc_cell() -> &'static RwLock<Option<Arc<dyn GcHooks>>> {
GLOBAL_GC.get_or_init(|| RwLock::new(None))
impl GlobalHooksState {
fn new() -> Self {
Self {
gc: None,
sched: None,
cur_token: None,
futures: Vec::new(),
strong: Vec::new(),
scope_depth: 0,
group_stack: Vec::new(),
}
}
}
fn sched_cell() -> &'static RwLock<Option<Arc<dyn Scheduler>>> {
GLOBAL_SCHED.get_or_init(|| RwLock::new(None))
}
fn token_cell() -> &'static RwLock<Option<CancellationToken>> {
GLOBAL_CUR_TOKEN.get_or_init(|| RwLock::new(None))
}
fn futures_cell() -> &'static RwLock<Vec<crate::boxes::future::FutureWeak>> {
GLOBAL_GROUP_FUTURES.get_or_init(|| RwLock::new(Vec::new()))
}
fn strong_cell() -> &'static RwLock<Vec<crate::boxes::future::FutureBox>> {
GLOBAL_GROUP_STRONG.get_or_init(|| RwLock::new(Vec::new()))
}
fn scope_depth_cell() -> &'static RwLock<usize> {
TASK_SCOPE_DEPTH.get_or_init(|| RwLock::new(0))
}
fn group_stack_cell(
) -> &'static RwLock<Vec<std::sync::Arc<crate::boxes::task_group_box::TaskGroupInner>>> {
TASK_GROUP_STACK.get_or_init(|| RwLock::new(Vec::new()))
static GLOBAL_STATE: OnceCell<RwLock<GlobalHooksState>> = OnceCell::new();
fn state() -> &'static RwLock<GlobalHooksState> {
GLOBAL_STATE.get_or_init(|| RwLock::new(GlobalHooksState::new()))
}
pub fn set_from_runtime(rt: &crate::runtime::nyash_runtime::NyashRuntime) {
if let Ok(mut g) = gc_cell().write() {
*g = Some(rt.gc.clone());
}
if let Ok(mut s) = sched_cell().write() {
*s = rt.scheduler.as_ref().cloned();
}
// Optional: initialize a fresh token for the runtime's root group (Phase 2 wiring)
if let Ok(mut t) = token_cell().write() {
if t.is_none() {
*t = Some(CancellationToken::new());
if let Ok(mut st) = state().write() {
st.gc = Some(rt.gc.clone());
st.sched = rt.scheduler.as_ref().cloned();
if st.cur_token.is_none() {
st.cur_token = Some(CancellationToken::new());
}
}
// Reset group futures registry on new runtime
if let Ok(mut f) = futures_cell().write() {
f.clear();
}
if let Ok(mut s) = strong_cell().write() {
s.clear();
}
if let Ok(mut d) = scope_depth_cell().write() {
*d = 0;
}
if let Ok(mut st) = group_stack_cell().write() {
st.clear();
st.futures.clear();
st.strong.clear();
st.scope_depth = 0;
st.group_stack.clear();
}
}
pub fn set_gc(gc: Arc<dyn GcHooks>) {
if let Ok(mut g) = gc_cell().write() {
*g = Some(gc);
if let Ok(mut st) = state().write() {
st.gc = Some(gc);
}
}
pub fn set_scheduler(s: Arc<dyn Scheduler>) {
if let Ok(mut w) = sched_cell().write() {
*w = Some(s);
if let Ok(mut st) = state().write() {
st.sched = Some(s);
}
}
/// Set the current task group's cancellation token (scaffold).
pub fn set_current_group_token(tok: CancellationToken) {
if let Ok(mut w) = token_cell().write() {
*w = Some(tok);
if let Ok(mut st) = state().write() {
st.cur_token = Some(tok);
}
}
/// Get the current task group's cancellation token (no-op default).
pub fn current_group_token() -> CancellationToken {
if let Ok(r) = token_cell().read() {
if let Some(t) = r.as_ref() {
if let Ok(st) = state().read() {
if let Some(t) = st.cur_token.as_ref() {
return t.clone();
}
}
@ -103,21 +80,17 @@ pub fn current_group_token() -> CancellationToken {
/// Register a Future into the current group's registry (best-effort; clones share state)
pub fn register_future_to_current_group(fut: &crate::boxes::future::FutureBox) {
// Prefer explicit current TaskGroup at top of stack
if let Ok(st) = group_stack_cell().read() {
if let Some(inner) = st.last() {
if let Ok(mut st) = state().write() {
// Prefer explicit current TaskGroup at top of stack
if let Some(inner) = st.group_stack.last() {
if let Ok(mut v) = inner.strong.lock() {
v.push(fut.clone());
return;
}
}
}
// Fallback to implicit global group
if let Ok(mut list) = futures_cell().write() {
list.push(fut.downgrade());
}
if let Ok(mut s) = strong_cell().write() {
s.push(fut.clone());
// Fallback to implicit global group
st.futures.push(fut.downgrade());
st.strong.push(fut.clone());
}
}
@ -127,26 +100,15 @@ pub fn join_all_registered_futures(timeout_ms: u64) {
let deadline = Instant::now() + Duration::from_millis(timeout_ms);
loop {
let mut all_ready = true;
// purge list of dropped or completed futures opportunistically
{
// purge weak list: keep only upgradeable futures
if let Ok(mut list) = futures_cell().write() {
list.retain(|fw| fw.is_ready().is_some());
}
// purge strong list: remove completed futures to reduce retention
if let Ok(mut s) = strong_cell().write() {
s.retain(|f| !f.ready());
}
}
// check readiness
{
if let Ok(list) = futures_cell().read() {
for fw in list.iter() {
if let Some(ready) = fw.is_ready() {
if !ready {
all_ready = false;
break;
}
// purge + readiness check under single state lock (short critical sections)
if let Ok(mut st) = state().write() {
st.futures.retain(|fw| fw.is_ready().is_some());
st.strong.retain(|f| !f.ready());
for fw in st.futures.iter() {
if let Some(ready) = fw.is_ready() {
if !ready {
all_ready = false;
break;
}
}
}
@ -161,22 +123,18 @@ pub fn join_all_registered_futures(timeout_ms: u64) {
std::thread::yield_now();
}
// Final sweep
if let Ok(mut s) = strong_cell().write() {
s.retain(|f| !f.ready());
}
if let Ok(mut list) = futures_cell().write() {
list.retain(|fw| matches!(fw.is_ready(), Some(false)));
if let Ok(mut st) = state().write() {
st.strong.retain(|f| !f.ready());
st.futures.retain(|fw| matches!(fw.is_ready(), Some(false)));
}
}
/// Push a task scope (footing). On pop of the outermost scope, perform a best-effort join.
pub fn push_task_scope() {
if let Ok(mut d) = scope_depth_cell().write() {
*d += 1;
}
// Push a new explicit TaskGroup for this scope
if let Ok(mut st) = group_stack_cell().write() {
st.push(std::sync::Arc::new(
if let Ok(mut st) = state().write() {
st.scope_depth += 1;
// Push a new explicit TaskGroup for this scope
st.group_stack.push(std::sync::Arc::new(
crate::boxes::task_group_box::TaskGroupInner {
strong: std::sync::Mutex::new(Vec::new()),
},
@ -190,19 +148,13 @@ pub fn push_task_scope() {
pub fn pop_task_scope() {
let mut do_join = false;
let mut popped: Option<std::sync::Arc<crate::boxes::task_group_box::TaskGroupInner>> = None;
{
if let Ok(mut d) = scope_depth_cell().write() {
if *d > 0 {
*d -= 1;
}
if *d == 0 {
do_join = true;
}
if let Ok(mut st) = state().write() {
if st.scope_depth > 0 {
st.scope_depth -= 1;
}
}
// Pop explicit group for this scope
if let Ok(mut st) = group_stack_cell().write() {
popped = st.pop();
if st.scope_depth == 0 { do_join = true; }
// Pop explicit group for this scope
popped = st.group_stack.pop();
}
if do_join {
let ms: u64 = std::env::var("NYASH_TASK_SCOPE_JOIN_MS")
@ -240,13 +192,11 @@ pub fn pop_task_scope() {
/// Perform a runtime safepoint and poll the scheduler if available.
pub fn safepoint_and_poll() {
if let Ok(g) = gc_cell().read() {
if let Some(gc) = g.as_ref() {
if let Ok(st) = state().read() {
if let Some(gc) = st.gc.as_ref() {
gc.safepoint();
}
}
if let Ok(s) = sched_cell().read() {
if let Some(sched) = s.as_ref() {
if let Some(sched) = st.sched.as_ref() {
sched.poll();
}
}
@ -255,8 +205,8 @@ pub fn safepoint_and_poll() {
/// Try to schedule a task on the global scheduler. Returns true if scheduled.
pub fn spawn_task(name: &str, f: Box<dyn FnOnce() + Send + 'static>) -> bool {
// If a scheduler is registered, enqueue the task; otherwise run inline.
if let Ok(s) = sched_cell().read() {
if let Some(sched) = s.as_ref() {
if let Ok(st) = state().read() {
if let Some(sched) = st.sched.as_ref() {
sched.spawn(name, f);
return true;
}
@ -272,8 +222,8 @@ pub fn spawn_task_with_token(
token: crate::runtime::scheduler::CancellationToken,
f: Box<dyn FnOnce() + Send + 'static>,
) -> bool {
if let Ok(s) = sched_cell().read() {
if let Some(sched) = s.as_ref() {
if let Ok(st) = state().read() {
if let Some(sched) = st.sched.as_ref() {
sched.spawn_with_token(name, token, f);
return true;
}
@ -284,8 +234,8 @@ pub fn spawn_task_with_token(
/// Spawn a delayed task via scheduler if available; returns true if scheduled.
pub fn spawn_task_after(delay_ms: u64, name: &str, f: Box<dyn FnOnce() + Send + 'static>) -> bool {
if let Ok(s) = sched_cell().read() {
if let Some(sched) = s.as_ref() {
if let Ok(st) = state().read() {
if let Some(sched) = st.sched.as_ref() {
sched.spawn_after(delay_ms, name, f);
return true;
}
@ -297,3 +247,21 @@ pub fn spawn_task_after(delay_ms: u64, name: &str, f: Box<dyn FnOnce() + Send +
});
false
}
/// Forward a GC barrier event to the currently registered GC hooks (if any).
pub fn gc_barrier(kind: BarrierKind) {
if let Ok(st) = state().read() {
if let Some(gc) = st.gc.as_ref() {
gc.barrier(kind);
}
}
}
/// Report an allocation to the current GC hooks (best-effort)
pub fn gc_alloc(bytes: u64) {
if let Ok(st) = state().read() {
if let Some(gc) = st.gc.as_ref() {
gc.alloc(bytes);
}
}
}