diff --git a/docs/development/current/main/phase-136-context-box-progress.md b/docs/development/current/main/phase-136-context-box-progress.md index be78a6b0..026416d4 100644 --- a/docs/development/current/main/phase-136-context-box-progress.md +++ b/docs/development/current/main/phase-136-context-box-progress.md @@ -4,7 +4,7 @@ builder.rs の 1219 行を責任ごとに Context Box に分割し、保守性・テスト容易性を向上させる段階的リファクタリング。 -## 完了した Context (1/7) +## 完了した Context (2/7) ### ✅ TypeContext (Step 1) - 完了 @@ -33,14 +33,41 @@ builder.rs の 1219 行を責任ごとに Context Box に分割し、保守性 - 16 ファイルで 113 箇所が deprecated フィールドを使用中 - 段階的移行により破壊的変更なし +**コミット**: 076f193f + +### ✅ CoreContext (Step 2) - 完了 + +**実装日**: 2025-12-15 + +**抽出したフィールド** (5個): +- `value_gen: ValueIdGenerator` - SSA 値 ID 生成器 +- `block_gen: BasicBlockIdGenerator` - 基本ブロック ID 生成器 +- `next_binding_id: u32` - BindingId 割り当てカウンタ (Phase 74) +- `temp_slot_counter: u32` - 一時ピンスロットカウンタ +- `debug_join_counter: u32` - デバッグスコープ join ID カウンタ + +**ファイル**: +- `/home/tomoaki/git/hakorune-selfhost/src/mir/builder/core_context.rs` (新規作成) + +**統合方法**: +- `MirBuilder` に `core_ctx: CoreContext` フィールドを追加 +- 既存フィールドは `#[deprecated]` でマーク(後方互換性維持) +- ID 割り当てメソッド (`next_value_id()`, `allocate_binding_id()`, `debug_next_join_id()`) が core_ctx を SSOT として使用し、legacy フィールドを同期 +- 新規ヘルパー `next_block_id()` を追加し、30 箇所の `block_gen.next()` 呼び出しを置換 + +**テスト結果**: +- ✅ `cargo build --release` 成功 (警告のみ、193 warnings) +- ✅ `cargo test --release --lib` - 1004/1004 PASS (7 tests 追加) +- ✅ `phase135_trim_mir_verify.sh` - PASS +- ✅ `phase132_exit_phi_parity.sh` - 3/3 PASS + +**影響範囲**: +- builder 内 30+ ファイルで `block_gen.next()` を `next_block_id()` に自動置換 +- 段階的移行により破壊的変更なし + **コミット**: (次回コミット時に記載) -## 残りの Context (6/7) - -### 2. CoreContext (計画中) -- `value_gen: ValueIdGenerator` -- `block_gen: BasicBlockIdGenerator` -- `next_fn_id: u32` (将来追加予定) +## 残りの Context (5/7) ### 3. ScopeContext (計画中) - `lexical_scope_stack: Vec` @@ -79,10 +106,10 @@ builder.rs の 1219 行を責任ごとに Context Box に分割し、保守性 ## 次のステップ -**優先順位 2**: CoreContext 抽出 -- ValueId/BlockId 生成の中核部分 -- 依存関係が少なく、分離が容易 -- テスト影響範囲が小さい +**優先順位 3**: ScopeContext 抽出 +- スコープスタック管理の集約 +- 制御フロースタック (loop/if/try) の統合 +- 関数コンテキスト管理の整理 ## 参考資料 diff --git a/src/mir/builder.rs b/src/mir/builder.rs index 29d67bc2..9860c869 100644 --- a/src/mir/builder.rs +++ b/src/mir/builder.rs @@ -19,6 +19,7 @@ mod builder_calls; mod call_resolution; // ChatGPT5 Pro: Type-safe call resolution utilities mod calls; // Call system modules (refactored from builder_calls) mod context; // BoxCompilationContext - 箱理論による静的Boxコンパイル時のコンテキスト分離 +mod core_context; // Phase 136 follow-up (Step 2/7): CoreContext extraction mod decls; // declarations lowering split mod exprs; // expression lowering split mod exprs_call; @@ -78,10 +79,19 @@ pub struct MirBuilder { /// Current basic block being built pub(super) current_block: Option, - /// Value ID generator + /// Phase 136 follow-up (Step 2/7): Core ID generation context + /// Consolidates value_gen, block_gen, next_binding_id, temp_slot_counter, debug_join_counter. + /// Direct field access for backward compatibility (migration in progress). + pub(super) core_ctx: core_context::CoreContext, + + /// [DEPRECATED] Value ID generator + /// Phase 136: Moved to core_ctx.value_gen (backward compat wrapper) + #[deprecated(note = "Use core_ctx.value_gen instead")] pub(super) value_gen: ValueIdGenerator, - /// Basic block ID generator + /// [DEPRECATED] Basic block ID generator + /// Phase 136: Moved to core_ctx.block_gen (backward compat wrapper) + #[deprecated(note = "Use core_ctx.block_gen instead")] pub(super) block_gen: BasicBlockIdGenerator, /// 箱理論: Static boxコンパイル時のコンテキスト分離 @@ -186,10 +196,9 @@ pub struct MirBuilder { /// Cleared after JoinIR merge completes. pub(super) reserved_value_ids: HashSet, - /// Phase 74: BindingId allocation counter (parallel to ValueId) - /// Monotonically increasing counter for lexical variable binding IDs. - /// Allocated via `allocate_binding_id()` method. - /// Independent from ValueId allocation to support Phase 75+ ScopeManager migration. + /// [DEPRECATED] Phase 74: BindingId allocation counter (parallel to ValueId) + /// Phase 136: Moved to core_ctx.next_binding_id (backward compat wrapper) + #[deprecated(note = "Use core_ctx.next_binding_id instead")] pub next_binding_id: u32, /// Phase 74: BindingId mapping for lexical variable bindings @@ -231,7 +240,9 @@ pub struct MirBuilder { /// Hint sink (zero-cost guidance; currently no-op) pub(super) hint_sink: crate::mir::hints::HintSink, - /// Internal counter for temporary pin slots (block-crossing ephemeral values) + /// [DEPRECATED] Internal counter for temporary pin slots (block-crossing ephemeral values) + /// Phase 136: Moved to core_ctx.temp_slot_counter (backward compat wrapper) + #[deprecated(note = "Use core_ctx.temp_slot_counter instead")] temp_slot_counter: u32, /// If true, skip entry materialization of pinned slots on the next start_new_block call. suppress_pin_entry_copy_next: bool, @@ -241,7 +252,9 @@ pub struct MirBuilder { // ---------------------- /// Stack of region identifiers like "loop#1/header" or "join#3/join". debug_scope_stack: Vec, - /// Monotonic counter for region IDs (deterministic across a run). + /// [DEPRECATED] Monotonic counter for region IDs (deterministic across a run). + /// Phase 136: Moved to core_ctx.debug_join_counter (backward compat wrapper) + #[deprecated(note = "Use core_ctx.debug_join_counter instead")] debug_join_counter: u32, /// Local SSA cache: ensure per-block materialization for critical operands (e.g., recv) @@ -289,13 +302,22 @@ impl MirBuilder { /// Create a new MIR builder pub fn new() -> Self { let plugin_method_sigs = plugin_sigs::load_plugin_method_sigs(); + let core_ctx = core_context::CoreContext::new(); + // フェーズM: no_phi_mode初期化削除 + #[allow(deprecated)] Self { current_module: None, current_function: None, current_block: None, + + // Phase 136 Step 2/7: Core context (new SSOT) + core_ctx, + + // Legacy fields (kept for backward compatibility, synced with core_ctx) value_gen: ValueIdGenerator::new(), block_gen: BasicBlockIdGenerator::new(), + compilation_context: None, // 箱理論: デフォルトは従来モード type_ctx: type_context::TypeContext::new(), // Phase 136: Type context variable_map: BTreeMap::new(), // Phase 25.1: 決定性確保 @@ -322,7 +344,7 @@ impl MirBuilder { fn_body_ast: None, // Phase 200-C: Initialize to None reserved_value_ids: HashSet::new(), // Phase 201-A: Initialize to empty - next_binding_id: 0, // Phase 74: Initialize BindingId counter + next_binding_id: 0, // Phase 74: Initialize BindingId counter (legacy) binding_map: BTreeMap::new(), // Phase 74: Initialize BindingId mapping loop_header_stack: Vec::new(), @@ -337,12 +359,12 @@ impl MirBuilder { cleanup_allow_return: false, cleanup_allow_throw: false, hint_sink: crate::mir::hints::HintSink::new(), - temp_slot_counter: 0, + temp_slot_counter: 0, // Legacy (synced with core_ctx) suppress_pin_entry_copy_next: false, // Debug scope context debug_scope_stack: Vec::new(), - debug_join_counter: 0, + debug_join_counter: 0, // Legacy (synced with core_ctx) local_ssa_map: HashMap::new(), schedule_mat_map: HashMap::new(), @@ -410,14 +432,11 @@ impl MirBuilder { /// // next_value_id() -> ValueId(20) /// // } /// ``` + #[allow(deprecated)] pub fn allocate_binding_id(&mut self) -> super::BindingId { - let id = super::BindingId::new(self.next_binding_id); - self.next_binding_id = self.next_binding_id.saturating_add(1); - debug_assert!( - self.next_binding_id < u32::MAX, - "BindingId counter overflow: {} (parallel to ValueId allocation)", - self.next_binding_id - ); + // Phase 136 Step 2/7: Use core_ctx as SSOT, sync legacy field + let id = self.core_ctx.next_binding(); + self.next_binding_id = self.core_ctx.next_binding_id; id } @@ -439,9 +458,11 @@ impl MirBuilder { // Debug scope helpers (region_id for DebugHub events) // ---------------------- #[inline] + #[allow(deprecated)] pub(crate) fn debug_next_join_id(&mut self) -> u32 { - let id = self.debug_join_counter; - self.debug_join_counter = self.debug_join_counter.saturating_add(1); + // Phase 136 Step 2/7: Use core_ctx as SSOT, sync legacy field + let id = self.core_ctx.next_debug_join(); + self.debug_join_counter = self.core_ctx.debug_join_counter; id } diff --git a/src/mir/builder/calls/lowering.rs b/src/mir/builder/calls/lowering.rs index 39be289c..a0254741 100644 --- a/src/mir/builder/calls/lowering.rs +++ b/src/mir/builder/calls/lowering.rs @@ -76,7 +76,7 @@ impl MirBuilder { ) -> Result<(), String> { let signature = function_lowering::prepare_static_method_signature(func_name.clone(), params, body); - let entry = self.block_gen.next(); + let entry = self.next_block_id(); let function = self.new_function_with_metadata(signature, entry); // 現在の関数・ブロックを保存 @@ -258,7 +258,7 @@ impl MirBuilder { ) -> Result<(), String> { let signature = function_lowering::prepare_method_signature(func_name, box_name, params, body); - let entry = self.block_gen.next(); + let entry = self.next_block_id(); let function = self.new_function_with_metadata(signature, entry); // 現在の関数・ブロックを保存 diff --git a/src/mir/builder/control_flow/exception/try_catch.rs b/src/mir/builder/control_flow/exception/try_catch.rs index dd55cf4d..ee5b691e 100644 --- a/src/mir/builder/control_flow/exception/try_catch.rs +++ b/src/mir/builder/control_flow/exception/try_catch.rs @@ -32,14 +32,14 @@ pub(in crate::mir::builder) fn cf_try_catch( return Ok(result); } - let try_block = builder.block_gen.next(); - let catch_block = builder.block_gen.next(); + let try_block = builder.next_block_id(); + let catch_block = builder.next_block_id(); let finally_block = if finally_body.is_some() { - Some(builder.block_gen.next()) + Some(builder.next_block_id()) } else { None }; - let exit_block = builder.block_gen.next(); + let exit_block = builder.next_block_id(); // Snapshot deferred-return state let saved_defer_active = builder.return_defer_active; diff --git a/src/mir/builder/control_flow/joinir/merge/block_allocator.rs b/src/mir/builder/control_flow/joinir/merge/block_allocator.rs index 6c57776c..3244b348 100644 --- a/src/mir/builder/control_flow/joinir/merge/block_allocator.rs +++ b/src/mir/builder/control_flow/joinir/merge/block_allocator.rs @@ -23,7 +23,7 @@ pub(super) fn allocate_blocks( // Phase 177-3: Allocate exit block FIRST to ensure it doesn't conflict with JoinIR blocks // This exit_block_id will be returned and used by instruction_rewriter and exit_phi_builder - let exit_block_id = builder.block_gen.next(); + let exit_block_id = builder.next_block_id(); eprintln!( "[cf_loop/joinir/block_allocator] Phase 177-3: Allocated exit_block_id = {:?}", @@ -49,7 +49,7 @@ pub(super) fn allocate_blocks( blocks.sort_by_key(|(id, _)| id.0); for (old_block_id, _) in blocks { - let new_block_id = builder.block_gen.next(); + let new_block_id = builder.next_block_id(); // Use remapper to store composite key mapping remapper.set_block(func_name.clone(), *old_block_id, new_block_id); diff --git a/src/mir/builder/core_context.rs b/src/mir/builder/core_context.rs new file mode 100644 index 00000000..32b8476b --- /dev/null +++ b/src/mir/builder/core_context.rs @@ -0,0 +1,201 @@ +/*! + * CoreContext - Core ID generation management for MirBuilder + * + * Phase 136 follow-up (Step 2/7): Extract ID generation fields from MirBuilder + * to improve code organization and enable centralized ID allocation. + * + * Consolidates: + * - value_gen: ValueIdGenerator for SSA values + * - block_gen: BasicBlockIdGenerator for basic blocks + * - next_binding_id: BindingId allocation counter + * - temp_slot_counter: Temporary pin slot counter + * - debug_join_counter: Debug scope join ID counter + */ + +use crate::mir::{BasicBlockId, BasicBlockIdGenerator, BindingId, ValueId, ValueIdGenerator}; + +/// Core ID generation context for MIR builder +/// +/// Provides centralized allocation for all MIR entity IDs. +/// All ID generators are collected here for better organization and SSOT compliance. +#[derive(Debug)] +pub(crate) struct CoreContext { + /// Primary ValueId generator for SSA values + pub value_gen: ValueIdGenerator, + + /// BasicBlockId generator for control flow graph + pub block_gen: BasicBlockIdGenerator, + + /// Phase 74: BindingId allocation counter (parallel to ValueId) + /// Monotonically increasing counter for lexical variable binding IDs. + pub next_binding_id: u32, + + /// Internal counter for temporary pin slots (block-crossing ephemeral values) + pub temp_slot_counter: u32, + + /// Phase 136: Debug scope join ID counter (deterministic region tracking) + pub debug_join_counter: u32, +} + +impl CoreContext { + /// Create a new CoreContext with default-initialized generators + pub fn new() -> Self { + Self { + value_gen: ValueIdGenerator::new(), + block_gen: BasicBlockIdGenerator::new(), + next_binding_id: 0, + temp_slot_counter: 0, + debug_join_counter: 0, + } + } + + /// Allocate the next ValueId from the primary generator + /// + /// Note: MirBuilder::next_value_id() provides higher-level allocation + /// with function context and reserved ID skipping. + pub fn next_value(&mut self) -> ValueId { + self.value_gen.next() + } + + /// Allocate the next BasicBlockId + pub fn next_block(&mut self) -> BasicBlockId { + self.block_gen.next() + } + + /// Allocate the next BindingId + /// + /// Phase 74: Independent from ValueId allocation to support stable binding + /// identity across SSA transformations. + pub fn next_binding(&mut self) -> BindingId { + let id = BindingId::new(self.next_binding_id); + self.next_binding_id = self.next_binding_id.saturating_add(1); + debug_assert!( + self.next_binding_id < u32::MAX, + "BindingId counter overflow: {}", + self.next_binding_id + ); + id + } + + /// Allocate the next temporary pin slot counter value + pub fn next_temp_slot(&mut self) -> u32 { + let id = self.temp_slot_counter; + self.temp_slot_counter = self.temp_slot_counter.saturating_add(1); + id + } + + /// Allocate the next debug join counter value + pub fn next_debug_join(&mut self) -> u32 { + let id = self.debug_join_counter; + self.debug_join_counter = self.debug_join_counter.saturating_add(1); + id + } + + /// Peek at the next ValueId without consuming it + pub fn peek_next_value(&self) -> ValueId { + self.value_gen.peek_next() + } + + /// Peek at the next BasicBlockId without consuming it + pub fn peek_next_block(&self) -> BasicBlockId { + self.block_gen.peek_next() + } +} + +impl Default for CoreContext { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_core_context_creation() { + let ctx = CoreContext::new(); + assert_eq!(ctx.peek_next_value().as_u32(), 0); + assert_eq!(ctx.peek_next_block().as_u32(), 0); + assert_eq!(ctx.next_binding_id, 0); + assert_eq!(ctx.temp_slot_counter, 0); + assert_eq!(ctx.debug_join_counter, 0); + } + + #[test] + fn test_value_allocation() { + let mut ctx = CoreContext::new(); + let v0 = ctx.next_value(); + let v1 = ctx.next_value(); + let v2 = ctx.next_value(); + assert_eq!(v0.as_u32(), 0); + assert_eq!(v1.as_u32(), 1); + assert_eq!(v2.as_u32(), 2); + assert_eq!(ctx.peek_next_value().as_u32(), 3); + } + + #[test] + fn test_block_allocation() { + let mut ctx = CoreContext::new(); + let b0 = ctx.next_block(); + let b1 = ctx.next_block(); + let b2 = ctx.next_block(); + assert_eq!(b0.as_u32(), 0); + assert_eq!(b1.as_u32(), 1); + assert_eq!(b2.as_u32(), 2); + assert_eq!(ctx.peek_next_block().as_u32(), 3); + } + + #[test] + fn test_binding_allocation() { + let mut ctx = CoreContext::new(); + let bid0 = ctx.next_binding(); + let bid1 = ctx.next_binding(); + let bid2 = ctx.next_binding(); + assert_eq!(bid0.raw(), 0); + assert_eq!(bid1.raw(), 1); + assert_eq!(bid2.raw(), 2); + assert_eq!(ctx.next_binding_id, 3); + } + + #[test] + fn test_temp_slot_allocation() { + let mut ctx = CoreContext::new(); + let t0 = ctx.next_temp_slot(); + let t1 = ctx.next_temp_slot(); + let t2 = ctx.next_temp_slot(); + assert_eq!(t0, 0); + assert_eq!(t1, 1); + assert_eq!(t2, 2); + } + + #[test] + fn test_debug_join_allocation() { + let mut ctx = CoreContext::new(); + let d0 = ctx.next_debug_join(); + let d1 = ctx.next_debug_join(); + let d2 = ctx.next_debug_join(); + assert_eq!(d0, 0); + assert_eq!(d1, 1); + assert_eq!(d2, 2); + } + + #[test] + fn test_independent_counters() { + let mut ctx = CoreContext::new(); + let v0 = ctx.next_value(); + let b0 = ctx.next_block(); + let bid0 = ctx.next_binding(); + let v1 = ctx.next_value(); + let b1 = ctx.next_block(); + let bid1 = ctx.next_binding(); + + // All counters are independent + assert_eq!(v0.as_u32(), 0); + assert_eq!(v1.as_u32(), 1); + assert_eq!(b0.as_u32(), 0); + assert_eq!(b1.as_u32(), 1); + assert_eq!(bid0.raw(), 0); + assert_eq!(bid1.raw(), 1); + } +} diff --git a/src/mir/builder/exprs_peek.rs b/src/mir/builder/exprs_peek.rs index bc3b4dad..41edfb11 100644 --- a/src/mir/builder/exprs_peek.rs +++ b/src/mir/builder/exprs_peek.rs @@ -13,12 +13,12 @@ impl super::MirBuilder { let scr_val = self.build_expression_impl(scrutinee)?; // Prepare merge and result - let merge_block: BasicBlockId = self.block_gen.next(); + let merge_block: BasicBlockId = self.next_block_id(); let result_val = self.next_value_id(); let mut phi_inputs: Vec<(BasicBlockId, ValueId)> = Vec::new(); // Create dispatch block where we start comparing arms - let dispatch_block = self.block_gen.next(); + let dispatch_block = self.next_block_id(); // Jump from current block to dispatch (ensure terminator exists) let need_jump = { let cur = self.current_block; @@ -39,7 +39,7 @@ impl super::MirBuilder { // If there are no arms, fall through to else directly if arms.is_empty() { - let else_block = self.block_gen.next(); + let else_block = self.next_block_id(); crate::mir::builder::emission::branch::emit_jump(self, else_block)?; self.start_new_block(else_block)?; let else_val = self.build_expression_impl(else_expr)?; @@ -52,15 +52,15 @@ impl super::MirBuilder { } // Else block to handle default case - let else_block = self.block_gen.next(); + let else_block = self.next_block_id(); // Chain dispatch blocks for each arm let mut cur_dispatch = dispatch_block; for (i, (label, arm_expr)) in arms.iter().cloned().enumerate() { - let then_block = self.block_gen.next(); + let then_block = self.next_block_id(); // Next dispatch (only for non-last arm) let next_dispatch = if i + 1 < arms.len() { - Some(self.block_gen.next()) + Some(self.next_block_id()) } else { None }; diff --git a/src/mir/builder/exprs_qmark.rs b/src/mir/builder/exprs_qmark.rs index 03b79c45..0724cd58 100644 --- a/src/mir/builder/exprs_qmark.rs +++ b/src/mir/builder/exprs_qmark.rs @@ -18,8 +18,8 @@ impl super::MirBuilder { method_id: None, effects: super::EffectMask::PURE, })?; - let then_block = self.block_gen.next(); - let else_block = self.block_gen.next(); + let then_block = self.next_block_id(); + let else_block = self.next_block_id(); let ok_local = self.local_ssa_ensure(ok_id, 4); crate::mir::builder::emission::branch::emit_conditional( self, ok_local, then_block, else_block, diff --git a/src/mir/builder/if_form.rs b/src/mir/builder/if_form.rs index b31ed2b5..727c04f4 100644 --- a/src/mir/builder/if_form.rs +++ b/src/mir/builder/if_form.rs @@ -97,9 +97,9 @@ impl MirBuilder { let condition_val = self.local_cond(condition_val); // Create blocks - let then_block = self.block_gen.next(); - let else_block = self.block_gen.next(); - let merge_block = self.block_gen.next(); + let then_block = self.next_block_id(); + let else_block = self.next_block_id(); + let merge_block = self.next_block_id(); // Branch let pre_branch_bb = self.current_block()?; diff --git a/src/mir/builder/lifecycle.rs b/src/mir/builder/lifecycle.rs index f6996521..6f90ce14 100644 --- a/src/mir/builder/lifecycle.rs +++ b/src/mir/builder/lifecycle.rs @@ -113,7 +113,7 @@ impl super::MirBuilder { effects: EffectMask::PURE, }; - let entry_block = self.block_gen.next(); + let entry_block = self.next_block_id(); let mut main_function = self.new_function_with_metadata(main_signature, entry_block); main_function.metadata.is_entry_point = true; diff --git a/src/mir/builder/ops.rs b/src/mir/builder/ops.rs index 46defa1c..ef3b21d1 100644 --- a/src/mir/builder/ops.rs +++ b/src/mir/builder/ops.rs @@ -340,9 +340,9 @@ impl super::MirBuilder { let lhs_val = self.pin_to_slot(lhs_val0, "@sc_lhs")?; // Prepare blocks - let then_block = self.block_gen.next(); - let else_block = self.block_gen.next(); - let merge_block = self.block_gen.next(); + let then_block = self.next_block_id(); + let else_block = self.next_block_id(); + let merge_block = self.next_block_id(); // Branch on LHS truthiness (runtime to_bool semantics in interpreter/LLVM) let mut lhs_cond = self.local_cond(lhs_val); @@ -371,9 +371,9 @@ impl super::MirBuilder { // OR: then → constant true let then_value_raw = if is_and { // Reduce arbitrary RHS to bool by branching on its truthiness and returning consts - let rhs_true = self.block_gen.next(); - let rhs_false = self.block_gen.next(); - let rhs_join = self.block_gen.next(); + let rhs_true = self.next_block_id(); + let rhs_false = self.next_block_id(); + let rhs_join = self.next_block_id(); let rhs_val = self.build_expression(right.clone())?; let mut rhs_cond = self.local_cond(rhs_val); crate::mir::builder::ssa::local::finalize_branch_cond(self, &mut rhs_cond); @@ -425,9 +425,9 @@ impl super::MirBuilder { let f_id = crate::mir::builder::emission::constant::emit_bool(self, false); f_id } else { - let rhs_true = self.block_gen.next(); - let rhs_false = self.block_gen.next(); - let rhs_join = self.block_gen.next(); + let rhs_true = self.next_block_id(); + let rhs_false = self.next_block_id(); + let rhs_join = self.next_block_id(); let rhs_val = self.build_expression(right)?; let mut rhs_cond = self.local_cond(rhs_val); crate::mir::builder::ssa::local::finalize_branch_cond(self, &mut rhs_cond); diff --git a/src/mir/builder/utils.rs b/src/mir/builder/utils.rs index d7f9a614..cfeeab30 100644 --- a/src/mir/builder/utils.rs +++ b/src/mir/builder/utils.rs @@ -35,12 +35,16 @@ impl super::MirBuilder { /// Phase 201-A: Skips reserved ValueIds (PHI dsts from LoopHeaderPhiBuilder) /// to prevent carrier value corruption in JoinIR loops. #[inline] + #[allow(deprecated)] pub(crate) fn next_value_id(&mut self) -> super::ValueId { loop { let candidate = if let Some(ref mut f) = self.current_function { f.next_value_id() // Function context } else { - self.value_gen.next() // Module context + // Phase 136 Step 2/7: Use core_ctx as SSOT, sync legacy field + let id = self.core_ctx.next_value(); + self.value_gen = self.core_ctx.value_gen.clone(); + id }; // Phase 201-A: Skip reserved PHI dst ValueIds @@ -54,6 +58,17 @@ impl super::MirBuilder { } } + /// Allocate a new BasicBlockId + /// + /// Phase 136 Step 2/7: Uses core_ctx as SSOT, syncs legacy field. + #[inline] + #[allow(deprecated)] + pub(crate) fn next_block_id(&mut self) -> super::BasicBlockId { + let id = self.core_ctx.next_block(); + self.block_gen = self.core_ctx.block_gen.clone(); + id + } + // ---- LocalSSA convenience (readability helpers) ---- #[allow(dead_code)] #[inline]