refactor(llvm): Complete Resolver pattern implementation across all instructions

Major structural improvement driven by ChatGPT 5 Pro analysis:
- Replace all direct vmap access with Resolver API calls
- Add proper cursor/bb_map/preds/block_end_values to all instruction handlers
- Ensure dominance safety by localizing values through Resolver
- Fix parameter passing in invoke/fields/extern handlers

Key changes:
- boxcall: Use resolver.resolve_i64/ptr instead of direct vmap access
- strings: Remove unused recv_v parameter, use Resolver throughout
- invoke: Add missing context parameters for proper PHI handling
- fields: Add resolver and block context parameters
- flow/arith/maps: Consistent Resolver usage pattern

This addresses the "structural invariant" requirements:
1. All value fetching goes through Resolver (no direct vmap.get)
2. Localization happens at BB boundaries via Resolver
3. Better preparation for PHI-only-in-dispatch pattern

Next: Consider boxing excessive parameters (15+ args in some functions)

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Selfhosting Dev
2025-09-12 22:36:20 +09:00
parent f77bbb5878
commit 8b48480844
16 changed files with 516 additions and 503 deletions

View File

@ -58,44 +58,15 @@ impl<'ctx> Resolver<'ctx> {
if let Some(pv) = self.ptr_locals.get(&(cur_bid, vid)).copied() {
return Ok(pv);
}
// Avoid using current vmap directly to keep dominance safe under multiple predecessors.
// Strategy: localize as i64 (dominance-safe PHI), then convert to i8* in current block.
let i8p = codegen.context.ptr_type(inkwell::AddressSpace::from(0));
let cur_llbb = *bb_map.get(&cur_bid).ok_or("cur bb missing")?;
let pred_list = preds.get(&cur_bid).cloned().unwrap_or_default();
// Insert PHI at block start
let saved_ip = codegen.builder.get_insert_block();
if let Some(first) = cur_llbb.get_first_instruction() { codegen.builder.position_before(&first); }
else { codegen.builder.position_at_end(cur_llbb); }
let phi = codegen.builder.build_phi(i8p, &format!("loc_p_{}", vid.as_u32())).map_err(|e| e.to_string())?;
if pred_list.is_empty() {
// Entry-like block: derive from vmap or zero
let base = vmap.get(&vid).copied().unwrap_or_else(|| i8p.const_zero().into());
let coerced = match base {
BVE::PointerValue(pv) => pv,
BVE::IntValue(iv) => cursor.emit_instr(cur_bid, |b| b.build_int_to_ptr(iv, i8p, "loc_i2p")).map_err(|e| e.to_string())?,
BVE::FloatValue(_) => i8p.const_zero(),
_ => i8p.const_zero(),
};
phi.add_incoming(&[(&coerced, cur_llbb)]);
} else {
for p in &pred_list {
let pred_bb = *bb_map.get(p).ok_or("pred bb missing")?;
let base = block_end_values
.get(p)
.and_then(|m| m.get(&vid).copied())
.unwrap_or_else(|| i8p.const_zero().into());
let coerced = match base {
BVE::PointerValue(pv) => pv,
BVE::IntValue(iv) => codegen.builder.build_int_to_ptr(iv, i8p, "loc_i2p_p").map_err(|e| e.to_string())?,
BVE::FloatValue(_) => i8p.const_zero(),
_ => i8p.const_zero(),
};
phi.add_incoming(&[(&coerced, pred_bb)]);
}
}
if let Some(bb) = saved_ip { codegen.builder.position_at_end(bb); }
let out = phi.as_basic_value().into_pointer_value();
self.ptr_locals.insert((cur_bid, vid), out);
Ok(out)
let iv = localize_to_i64(codegen, cursor, cur_bid, vid, bb_map, preds, block_end_values, vmap)?;
let pv = cursor
.emit_instr(cur_bid, |b| b.build_int_to_ptr(iv, i8p, "loc_i2p_dom"))
.map_err(|e| e.to_string())?;
self.ptr_locals.insert((cur_bid, vid), pv);
Ok(pv)
}
/// Resolve a MIR value as an f64 dominating the current block.
@ -113,6 +84,7 @@ impl<'ctx> Resolver<'ctx> {
if let Some(fv) = self.f64_locals.get(&(cur_bid, vid)).copied() {
return Ok(fv);
}
// Avoid using current vmap directly to keep dominance safe under multiple predecessors.
let f64t = codegen.context.f64_type();
let cur_llbb = *bb_map.get(&cur_bid).ok_or("cur bb missing")?;
let pred_list = preds.get(&cur_bid).cloned().unwrap_or_default();
@ -121,14 +93,9 @@ impl<'ctx> Resolver<'ctx> {
else { codegen.builder.position_at_end(cur_llbb); }
let phi = codegen.builder.build_phi(f64t, &format!("loc_f64_{}", vid.as_u32())).map_err(|e| e.to_string())?;
if pred_list.is_empty() {
let base = vmap.get(&vid).copied().unwrap_or_else(|| f64t.const_zero().into());
let coerced = match base {
BVE::FloatValue(fv) => fv,
BVE::IntValue(iv) => codegen.builder.build_signed_int_to_float(iv, f64t, "loc_i2f").map_err(|e| e.to_string())?,
BVE::PointerValue(_) => f64t.const_zero(),
_ => f64t.const_zero(),
};
phi.add_incoming(&[(&coerced, cur_llbb)]);
// No predecessor: conservatively zerovmap には依存しない)
let z = f64t.const_zero();
phi.add_incoming(&[(&z, cur_llbb)]);
} else {
for p in &pred_list {
let pred_bb = *bb_map.get(p).ok_or("pred bb missing")?;
@ -136,12 +103,17 @@ impl<'ctx> Resolver<'ctx> {
.get(p)
.and_then(|m| m.get(&vid).copied())
.unwrap_or_else(|| f64t.const_zero().into());
let coerced = match base {
BVE::FloatValue(fv) => fv,
BVE::IntValue(iv) => codegen.builder.build_signed_int_to_float(iv, f64t, "loc_i2f_p").map_err(|e| e.to_string())?,
BVE::PointerValue(_) => f64t.const_zero(),
_ => f64t.const_zero(),
};
let mut coerced = f64t.const_zero();
cursor.with_block(*p, pred_bb, |c| {
let term = unsafe { pred_bb.get_terminator() };
if let Some(t) = term { codegen.builder.position_before(&t); } else { c.position_at_end(pred_bb); }
coerced = match base {
BVE::FloatValue(fv) => fv,
BVE::IntValue(iv) => codegen.builder.build_signed_int_to_float(iv, f64t, "loc_i2f_p").map_err(|e| e.to_string()).unwrap(),
BVE::PointerValue(_) => f64t.const_zero(),
_ => f64t.const_zero(),
};
});
phi.add_incoming(&[(&coerced, pred_bb)]);
}
}