## ドキュメント更新 - CURRENT_TASK.md: Phase 25.2完了記録 - phase-25.1b/e/q/25.2 README更新 - json_v0_bridge/README.md新規追加 ## テストファイル整理 - vtable_*テストをtests/archive/に移動(6ファイル) - json_program_loop.rsテスト追加 ## コード整理 - プラグイン(egui/python-compiler)微修正 - benchmarks.rs, instance_v2.rs更新 - MIR関連ファイル微調整 ## 全体成果 Phase 25.2完了により: - LoopSnapshotMergeBox統一管理実装 - ValueId(1283)バグ根本解決 - ~35行コード削減(目標210行の16%) - 11テスト全部PASS、3実行テストケースPASS
218 lines
6.9 KiB
Rust
218 lines
6.9 KiB
Rust
/*!
|
||
* Nyash Performance Benchmarks
|
||
*
|
||
* Compare execution performance across different backends:
|
||
* - Interpreter (AST direct execution)
|
||
* - VM (MIR -> VM execution)
|
||
* - WASM (MIR -> WASM execution)
|
||
*/
|
||
|
||
#[cfg(feature = "wasm-backend")]
|
||
use crate::backend::WasmBackend;
|
||
// VM import removed with vm-legacy
|
||
// Interpreter and MirCompiler removed
|
||
use std::fs;
|
||
|
||
#[derive(Debug)]
|
||
pub struct BenchmarkResult {
|
||
pub name: String,
|
||
pub backend: String,
|
||
pub duration_ms: f64,
|
||
pub iterations: u32,
|
||
pub avg_duration_ms: f64,
|
||
}
|
||
|
||
pub struct BenchmarkSuite {
|
||
iterations: u32,
|
||
}
|
||
|
||
impl BenchmarkSuite {
|
||
pub fn new(iterations: u32) -> Self {
|
||
Self { iterations }
|
||
}
|
||
|
||
/// Run comprehensive benchmark across all backends
|
||
pub fn run_all(&self) -> Vec<BenchmarkResult> {
|
||
#[cfg(feature = "wasm-backend")]
|
||
let mut results = Vec::new();
|
||
#[cfg(not(feature = "wasm-backend"))]
|
||
let results = Vec::new();
|
||
|
||
let benchmarks = [
|
||
("bench_light", "benchmarks/bench_light.hako"),
|
||
("bench_medium", "benchmarks/bench_medium.hako"),
|
||
("bench_heavy", "benchmarks/bench_heavy.hako"),
|
||
];
|
||
|
||
for (name, file_path) in &benchmarks {
|
||
println!("🚀 Running benchmark: {}", name);
|
||
|
||
// Test if file exists and is readable
|
||
if let Ok(_source) = fs::read_to_string(file_path) {
|
||
// Run on all backends
|
||
// Interpreter benchmark removed with legacy interpreter
|
||
|
||
// VM benchmark removed with vm-legacy
|
||
|
||
#[cfg(feature = "wasm-backend")]
|
||
if let Ok(wasm_result) = self.run_wasm_benchmark(name, &_source) {
|
||
results.push(wasm_result);
|
||
}
|
||
} else {
|
||
println!("⚠️ Could not read benchmark file: {}", file_path);
|
||
}
|
||
}
|
||
|
||
results
|
||
}
|
||
|
||
// Interpreter benchmark removed with legacy interpreter
|
||
|
||
// VM benchmark removed with vm-legacy
|
||
#[allow(dead_code)]
|
||
fn run_vm_benchmark(
|
||
&self,
|
||
_name: &str,
|
||
_source: &str,
|
||
) -> Result<BenchmarkResult, Box<dyn std::error::Error>> {
|
||
Err("VM benchmark removed with vm-legacy".into())
|
||
}
|
||
|
||
/// Run benchmark on WASM backend
|
||
#[cfg(feature = "wasm-backend")]
|
||
fn run_wasm_benchmark(
|
||
&self,
|
||
name: &str,
|
||
source: &str,
|
||
) -> Result<BenchmarkResult, Box<dyn std::error::Error>> {
|
||
let mut total_duration = 0.0;
|
||
|
||
for i in 0..self.iterations {
|
||
let start = Instant::now();
|
||
|
||
// Parse -> MIR -> WASM
|
||
let ast = NyashParser::parse_from_string(source)?;
|
||
let mut compiler = MirCompiler::new();
|
||
let compile_result = compiler.compile(ast)?;
|
||
|
||
// Generate and execute WASM
|
||
let mut wasm_backend = WasmBackend::new();
|
||
let _wat_output = wasm_backend.compile_module(compile_result.module)?;
|
||
// Note: For now we only measure compilation time
|
||
// Full WASM execution would require wasmtime integration
|
||
|
||
let duration = start.elapsed();
|
||
total_duration += duration.as_secs_f64() * 1000.0; // Convert to ms
|
||
|
||
if i == 0 {
|
||
println!(" 🌐 WASM: First run completed (compilation only)");
|
||
}
|
||
}
|
||
|
||
Ok(BenchmarkResult {
|
||
name: name.to_string(),
|
||
backend: "WASM".to_string(),
|
||
duration_ms: total_duration,
|
||
iterations: self.iterations,
|
||
avg_duration_ms: total_duration / (self.iterations as f64),
|
||
})
|
||
}
|
||
|
||
/// Print benchmark results in a nice format
|
||
pub fn print_results(&self, results: &[BenchmarkResult]) {
|
||
println!("\n📊 Nyash Performance Benchmark Results");
|
||
println!("=====================================");
|
||
println!("Iterations per test: {}", self.iterations);
|
||
println!();
|
||
|
||
// Group by benchmark name
|
||
let mut benchmarks: std::collections::HashMap<String, Vec<&BenchmarkResult>> =
|
||
std::collections::HashMap::new();
|
||
for result in results {
|
||
benchmarks
|
||
.entry(result.name.clone())
|
||
.or_insert_with(Vec::new)
|
||
.push(result);
|
||
}
|
||
|
||
for (bench_name, bench_results) in benchmarks {
|
||
println!("🎯 {}", bench_name);
|
||
println!(" Backend | Avg Time (ms) | Total Time (ms) | Speed Ratio");
|
||
println!(" --------------|---------------|-----------------|------------");
|
||
|
||
// Find fastest for ratio calculation
|
||
let fastest = bench_results
|
||
.iter()
|
||
.min_by(|a, b| a.avg_duration_ms.partial_cmp(&b.avg_duration_ms).unwrap())
|
||
.unwrap();
|
||
|
||
for result in &bench_results {
|
||
let ratio = result.avg_duration_ms / fastest.avg_duration_ms;
|
||
println!(
|
||
" {:12} | {:11.3} | {:13.1} | {:8.2}x",
|
||
result.backend, result.avg_duration_ms, result.duration_ms, ratio
|
||
);
|
||
}
|
||
println!();
|
||
}
|
||
|
||
// Summary
|
||
println!("💡 Performance Summary:");
|
||
let interpreter_avg: f64 = results
|
||
.iter()
|
||
.filter(|r| r.backend == "Interpreter")
|
||
.map(|r| r.avg_duration_ms)
|
||
.sum::<f64>()
|
||
/ results
|
||
.iter()
|
||
.filter(|r| r.backend == "Interpreter")
|
||
.count() as f64;
|
||
|
||
let vm_avg: f64 = results
|
||
.iter()
|
||
.filter(|r| r.backend == "VM")
|
||
.map(|r| r.avg_duration_ms)
|
||
.sum::<f64>()
|
||
/ results.iter().filter(|r| r.backend == "VM").count() as f64;
|
||
|
||
let wasm_avg: f64 = results
|
||
.iter()
|
||
.filter(|r| r.backend == "WASM")
|
||
.map(|r| r.avg_duration_ms)
|
||
.sum::<f64>()
|
||
/ results.iter().filter(|r| r.backend == "WASM").count() as f64;
|
||
|
||
println!(" 📈 Average across all benchmarks:");
|
||
println!(" Interpreter: {:.2} ms", interpreter_avg);
|
||
println!(
|
||
" VM: {:.2} ms ({:.1}x faster than interpreter)",
|
||
vm_avg,
|
||
interpreter_avg / vm_avg
|
||
);
|
||
println!(
|
||
" WASM: {:.2} ms ({:.1}x faster than interpreter)",
|
||
wasm_avg,
|
||
interpreter_avg / wasm_avg
|
||
);
|
||
}
|
||
}
|
||
|
||
#[cfg(test)]
|
||
mod tests {
|
||
use super::*;
|
||
|
||
/// Legacy benchmark smoke(Phase 15 以前の計測用).
|
||
/// 現在の CI / 開発フローでは必須ではないため、デフォルト実行から外す。
|
||
#[test]
|
||
#[ignore]
|
||
fn test_benchmark_light() {
|
||
let suite = BenchmarkSuite::new(3); // Only 3 iterations for testing
|
||
let results = suite.run_all();
|
||
|
||
// Should have results for all backends
|
||
assert!(results.len() >= 3); // At least one benchmark with 3 backends
|
||
|
||
suite.print_results(&results);
|
||
}
|
||
}
|