feat(mir): Phase 74 - BindingId infrastructure (dev-only)

Phase 74 implements BindingId as parallel allocation alongside ValueId for
lexical scope tracking and shadowing-aware variable identity.

Changes:
- binding_id.rs: New BindingId type with overflow protection (5 unit tests)
- builder.rs: Added next_binding_id counter and binding_map (4 integration tests)
- lexical_scope.rs: Extended restoration logic for BindingId management
- mod.rs: Public re-export of BindingId

Tests: 9/9 new PASS, lib 958/958 PASS (no regressions)
Architecture: Parallel BindingId/ValueId allocation for deterministic shadowing

Phase 75-77 will build on this infrastructure for type-safe promotion tracking.

🤖 Generated with Claude Code

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
nyash-codex
2025-12-13 05:34:56 +09:00
parent ea7fb607c7
commit e1574af741
7 changed files with 957 additions and 1 deletions

View File

@ -173,6 +173,19 @@ pub struct MirBuilder {
/// Cleared after JoinIR merge completes.
pub(super) reserved_value_ids: HashSet<ValueId>,
/// Phase 74: BindingId allocation counter (parallel to ValueId)
/// Monotonically increasing counter for lexical variable binding IDs.
/// Allocated via `allocate_binding_id()` method.
/// Independent from ValueId allocation to support Phase 75+ ScopeManager migration.
pub next_binding_id: u32,
/// Phase 74: BindingId mapping for lexical variable bindings
/// Maps variable names to their current BindingId.
/// Parallel to `variable_map` (String -> ValueId), but tracks binding identity.
/// Restored on lexical scope exit (see `pop_lexical_scope()`).
/// BTreeMap for deterministic iteration (Phase 25.1 consistency).
pub binding_map: BTreeMap<String, super::BindingId>,
// include guards removed
/// Loop context stacks for lowering break/continue inside nested control flow
/// Top of stack corresponds to the innermost active loop
@ -295,6 +308,9 @@ impl MirBuilder {
fn_body_ast: None, // Phase 200-C: Initialize to None
reserved_value_ids: HashSet::new(), // Phase 201-A: Initialize to empty
next_binding_id: 0, // Phase 74: Initialize BindingId counter
binding_map: BTreeMap::new(), // Phase 74: Initialize BindingId mapping
loop_header_stack: Vec::new(),
loop_exit_stack: Vec::new(),
if_merge_stack: Vec::new(),
@ -340,6 +356,40 @@ impl MirBuilder {
self.suppress_pin_entry_copy_next = true;
}
// ---- Phase 74: BindingId allocation ----
/// Allocate a new BindingId (parallel to ValueId allocation)
///
/// ## Parallel ValueId/BindingId Allocation
///
/// BindingId allocation is completely independent from ValueId allocation:
/// - `next_value_id()` increments `value_gen` counter
/// - `allocate_binding_id()` increments `next_binding_id` counter
///
/// This parallelism enables:
/// 1. **Stable binding identity** across SSA transformations
/// 2. **Independent shadowing tracking** separate from SSA renaming
/// 3. **Future ScopeManager migration** (Phase 75+) without breaking SSA
///
/// Example:
/// ```ignore
/// // local x = 1; <- allocate_binding_id() -> BindingId(0)
/// // next_value_id() -> ValueId(10)
/// // {
/// // local x = 2; <- allocate_binding_id() -> BindingId(1)
/// // next_value_id() -> ValueId(20)
/// // }
/// ```
pub fn allocate_binding_id(&mut self) -> super::BindingId {
let id = super::BindingId::new(self.next_binding_id);
self.next_binding_id = self.next_binding_id.saturating_add(1);
debug_assert!(
self.next_binding_id < u32::MAX,
"BindingId counter overflow: {} (parallel to ValueId allocation)",
self.next_binding_id
);
id
}
// ---- Hint helpers (no-op by default) ----
#[inline]
pub(crate) fn hint_scope_enter(&mut self, id: u32) {
@ -1057,3 +1107,91 @@ impl Default for MirBuilder {
Self::new()
}
}
#[cfg(test)]
mod binding_id_tests {
use super::*;
#[test]
fn test_binding_map_initialization() {
let builder = MirBuilder::new();
assert_eq!(builder.next_binding_id, 0);
assert!(builder.binding_map.is_empty());
}
#[test]
fn test_binding_allocation_sequential() {
let mut builder = MirBuilder::new();
let bid0 = builder.allocate_binding_id();
let bid1 = builder.allocate_binding_id();
let bid2 = builder.allocate_binding_id();
assert_eq!(bid0.raw(), 0);
assert_eq!(bid1.raw(), 1);
assert_eq!(bid2.raw(), 2);
assert_eq!(builder.next_binding_id, 3);
}
#[test]
fn test_shadowing_binding_restore() {
let mut builder = MirBuilder::new();
// Simulate function entry scope
builder.push_lexical_scope();
// Declare outer x
let outer_vid = builder.value_gen.next();
builder
.declare_local_in_current_scope("x", outer_vid)
.unwrap();
let outer_bid = *builder.binding_map.get("x").unwrap();
assert_eq!(outer_bid.raw(), 0);
// Enter inner scope and shadow x
builder.push_lexical_scope();
let inner_vid = builder.value_gen.next();
builder
.declare_local_in_current_scope("x", inner_vid)
.unwrap();
let inner_bid = *builder.binding_map.get("x").unwrap();
assert_eq!(inner_bid.raw(), 1);
// Exit inner scope - should restore outer binding
builder.pop_lexical_scope();
let restored_bid = *builder.binding_map.get("x").unwrap();
assert_eq!(restored_bid, outer_bid);
assert_eq!(restored_bid.raw(), 0);
// Cleanup
builder.pop_lexical_scope();
}
#[test]
fn test_valueid_binding_parallel_allocation() {
let mut builder = MirBuilder::new();
// Allocate ValueIds and BindingIds in parallel
let vid0 = builder.value_gen.next();
let bid0 = builder.allocate_binding_id();
let vid1 = builder.value_gen.next();
let bid1 = builder.allocate_binding_id();
// ValueId and BindingId should be independent
assert_eq!(vid0.0, 0);
assert_eq!(bid0.raw(), 0);
assert_eq!(vid1.0, 1);
assert_eq!(bid1.raw(), 1);
// Allocating more ValueIds should not affect BindingId counter
let _ = builder.value_gen.next();
let _ = builder.value_gen.next();
let bid2 = builder.allocate_binding_id();
assert_eq!(bid2.raw(), 2); // Still sequential
// Allocating more BindingIds should not affect ValueId counter
let _ = builder.allocate_binding_id();
let _ = builder.allocate_binding_id();
let vid2 = builder.value_gen.next();
assert_eq!(vid2.0, 4); // Continues from where we left off
}
}