Files
hakorune/src/llvm_py/instructions/boxcall.py

518 lines
23 KiB
Python
Raw Normal View History

"""
BoxCall instruction lowering
Core of Nyash's "Everything is Box" philosophy
"""
import llvmlite.ir as ir
from typing import Dict, List, Optional, Any
from instructions.safepoint import insert_automatic_safepoint
feat(naming): Python NamingHelper実装 - Rust NamingBoxのミラー完成 Phase 25.4 メンテナンス: Python LLVM側のNamingBox SSOT統一 ## 📦 実装内容 ### 1. Python NamingHelper作成 - 新規作成: `src/llvm_py/naming_helper.py` - Rust `src/mir/naming.rs` と完全同一の意味論を実装 - 3つの関数: - `encode_static_method(box_name, method, arity)` → "BoxName.method/arity" - `canonical_box_name(raw)` → "main" → "Main" - `normalize_static_global_name(func_name)` → "main._nop/0" → "Main._nop/0" - doctest 9個全てPASS ✅ ### 2. Python LLVM側の統一修正 - `instructions/boxcall.py:437` - f"Main.{method_name}/{arity}" → encode_static_method() - `instructions/call.py:170-173` - traced_names タプル生成をNamingHelper経由に変更 - `pyvm/intrinsic.py:17, 50` - "Main.esc_json/1", "Main.dirname/1" → encode_static_method() - `builders/entry.py:16` - 'Main.main/1' → encode_static_method("Main", "main", 1) ## 🎯 技術的成果 - **意味論一致**: Rust ↔ Python で完全同一の命名規則 - **保守性向上**: ハードコード4箇所 → NamingHelper一元管理 - **テスト完備**: doctest 9個でRust NamingBoxと同一動作を保証 ## テスト結果 ✅ python3 -m py_compile: 全ファイル構文OK ✅ python3 -m doctest naming_helper.py: 9 tests passed ## 参考 - Phase 25.4-A (Rust側): fa9cea51, bceb20ed - Rust NamingBox SSOT: src/mir/naming.rs 🎉 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 09:38:49 +09:00
from naming_helper import encode_static_method
def _declare(module: ir.Module, name: str, ret, args):
for f in module.functions:
if f.name == name:
return f
fnty = ir.FunctionType(ret, args)
return ir.Function(module, fnty, name=name)
def _ensure_handle(builder: ir.IRBuilder, module: ir.Module, v: ir.Value) -> ir.Value:
"""Coerce a value to i64 handle. If pointer, box via nyash.box.from_i8_string."""
i64 = ir.IntType(64)
if hasattr(v, 'type'):
if isinstance(v.type, ir.IntType) and v.type.width == 64:
return v
if isinstance(v.type, ir.PointerType):
# call nyash.box.from_i8_string(i8*) -> i64
i8p = ir.IntType(8).as_pointer()
# If pointer-to-array, GEP to first element
try:
if isinstance(v.type.pointee, ir.ArrayType):
c0 = ir.IntType(32)(0)
v = builder.gep(v, [c0, c0], name="bc_str_gep")
except Exception:
pass
callee = _declare(module, "nyash.box.from_i8_string", i64, [i8p])
return builder.call(callee, [v], name="str_ptr2h")
if isinstance(v.type, ir.IntType):
# extend/trunc to i64
return builder.zext(v, i64) if v.type.width < 64 else builder.trunc(v, i64)
return ir.Constant(i64, 0)
def lower_boxcall(
builder: ir.IRBuilder,
module: ir.Module,
box_vid: int,
method_name: str,
args: List[int],
dst_vid: Optional[int],
vmap: Dict[int, ir.Value],
resolver=None,
preds=None,
block_end_values=None,
bb_map=None,
ctx: Optional[Any] = None,
) -> None:
# Guard against emitting after a terminator: create continuation block if needed.
try:
if builder.block is not None and getattr(builder.block, 'terminator', None) is not None:
func = builder.block.parent
cont = func.append_basic_block(name=f"cont_bb_{builder.block.name}")
builder.position_at_end(cont)
except Exception:
pass
"""
Lower MIR BoxCall instruction
Current implementation uses method_id approach for plugin boxes.
Args:
builder: Current LLVM IR builder
module: LLVM module
box_vid: Box instance value ID (handle)
method_name: Method name to call
args: List of argument value IDs
dst_vid: Optional destination for return value
vmap: Value map
resolver: Optional resolver for type handling
"""
i64 = ir.IntType(64)
i8 = ir.IntType(8)
i8p = i8.as_pointer()
# Insert a safepoint around potential heavy boxcall sites (pre-call)
try:
import os
if os.environ.get('NYASH_LLVM_AUTO_SAFEPOINT', '1') == '1':
insert_automatic_safepoint(builder, module, "boxcall")
except Exception:
pass
# Short-hands with ctx (backward-compatible fallback)
r = resolver
p = preds
bev = block_end_values
bbm = bb_map
if ctx is not None:
try:
r = getattr(ctx, 'resolver', r)
p = getattr(ctx, 'preds', p)
bev = getattr(ctx, 'block_end_values', bev)
bbm = getattr(ctx, 'bb_map', bbm)
except Exception:
pass
def _res_i64(vid: int):
if r is not None and p is not None and bev is not None and bbm is not None:
try:
return r.resolve_i64(vid, builder.block, p, bev, vmap, bbm)
except Exception:
return None
return vmap.get(vid)
# If BuildCtx is provided, prefer its maps for consistency.
if ctx is not None:
try:
if getattr(ctx, 'resolver', None) is not None:
resolver = ctx.resolver
if getattr(ctx, 'preds', None) is not None and preds is None:
preds = ctx.preds
if getattr(ctx, 'block_end_values', None) is not None and block_end_values is None:
block_end_values = ctx.block_end_values
if getattr(ctx, 'bb_map', None) is not None and bb_map is None:
bb_map = ctx.bb_map
except Exception:
pass
# Receiver value
recv_val = _res_i64(box_vid)
if recv_val is None:
recv_val = vmap.get(box_vid, ir.Constant(i64, 0))
# Minimal method bridging for strings and console
if method_name in ("length", "len"):
feat(phase21.5/22.1): MirBuilder JsonFrag refactor + FileBox ring-1 + registry tests Phase 21.5 (AOT/LLVM Optimization Prep) - FileBox ring-1 (core-ro) provider: priority=-100, always available, no panic path - src/runner/modes/common_util/provider_registry.rs: CoreRoFileProviderFactory - Auto-registers at startup, eliminates fallback panic structurally - StringBox fast path prototypes (length/size optimization) - Performance benchmarks (C/Python/Hako comparison baseline) Phase 22.1 (JsonFrag Unification) - JsonFrag.last_index_of_from() for backward search (VM fallback) - Replace hand-written lastIndexOf in lower_loop_sum_bc_box.hako - SentinelExtractorBox for Break/Continue pattern extraction MirBuilder Refactor (Box → JsonFrag Migration) - 20+ lower_*_box.hako: Box-heavy → JsonFrag text assembly - MirBuilderMinBox: lightweight using set for dev env - Registry-only fast path with [registry:*] tag observation - pattern_util_box.hako: enhanced pattern matching Dev Environment & Testing - Dev toggles: SMOKES_DEV_PREINCLUDE=1 (point-enable), HAKO_MIR_BUILDER_SKIP_LOOPS=1 - phase2160: registry opt-in tests (array/map get/set/push/len) - content verification - phase2034: rc-dependent → token grep (grep -F based validation) - run_quick.sh: fast smoke testing harness - ENV documentation: docs/ENV_VARS.md Test Results ✅ quick phase2034: ALL GREEN (MirBuilder internal patterns) ✅ registry phase2160: ALL GREEN (array/map get/set/push/len) ✅ rc-dependent tests → content token verification complete ✅ PREINCLUDE policy: default OFF, point-enable only where needed Technical Notes - No INCLUDE by default (maintain minimalism) - FAIL_FAST=0 in Bring-up contexts only (explicit dev toggles) - Tag-based route observation ([mirbuilder/min:*], [registry:*]) - MIR structure validation (not just rc parity) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-10 19:42:42 +09:00
# Fast path (opt-in): pointer-based string length → nyash.string.length_si(i8*, i64 mode)
try:
import os
fast_on = os.environ.get('NYASH_LLVM_FAST') == '1'
except Exception:
fast_on = False
def _cache_len(val):
if not fast_on or resolver is None or dst_vid is None or box_vid is None:
return
cache = getattr(resolver, 'length_cache', None)
if cache is None:
return
try:
cache[int(box_vid)] = val
except Exception:
pass
if fast_on and resolver is not None and dst_vid is not None and box_vid is not None:
cache = getattr(resolver, 'length_cache', None)
if cache is not None:
try:
cached = cache.get(int(box_vid))
except Exception:
cached = None
if cached is not None:
vmap[dst_vid] = cached
return
# Ultra-fast: literal length folding when receiver originates from a string literal.
# Check resolver.newbox_string_args[recv] -> arg_vid -> resolver.string_literals[arg_vid]
if fast_on and dst_vid is not None and resolver is not None:
try:
arg_vid = None
if hasattr(resolver, 'newbox_string_args'):
arg_vid = resolver.newbox_string_args.get(int(box_vid))
# Case A: newbox(StringBox, const)
if arg_vid is not None and hasattr(resolver, 'string_literals'):
lit = resolver.string_literals.get(int(arg_vid))
if isinstance(lit, str):
# Mode: bytes or code points
use_cp = os.environ.get('NYASH_STR_CP') == '1'
n = len(lit) if use_cp else len(lit.encode('utf-8'))
const_len = ir.Constant(ir.IntType(64), n)
vmap[dst_vid] = const_len
_cache_len(const_len)
return
# Case B: receiver itself is a literal-backed handle (const string)
if hasattr(resolver, 'string_literals'):
lit2 = resolver.string_literals.get(int(box_vid))
if isinstance(lit2, str):
use_cp = os.environ.get('NYASH_STR_CP') == '1'
n2 = len(lit2) if use_cp else len(lit2.encode('utf-8'))
const_len2 = ir.Constant(ir.IntType(64), n2)
vmap[dst_vid] = const_len2
_cache_len(const_len2)
return
except Exception:
pass
feat(phase21.5/22.1): MirBuilder JsonFrag refactor + FileBox ring-1 + registry tests Phase 21.5 (AOT/LLVM Optimization Prep) - FileBox ring-1 (core-ro) provider: priority=-100, always available, no panic path - src/runner/modes/common_util/provider_registry.rs: CoreRoFileProviderFactory - Auto-registers at startup, eliminates fallback panic structurally - StringBox fast path prototypes (length/size optimization) - Performance benchmarks (C/Python/Hako comparison baseline) Phase 22.1 (JsonFrag Unification) - JsonFrag.last_index_of_from() for backward search (VM fallback) - Replace hand-written lastIndexOf in lower_loop_sum_bc_box.hako - SentinelExtractorBox for Break/Continue pattern extraction MirBuilder Refactor (Box → JsonFrag Migration) - 20+ lower_*_box.hako: Box-heavy → JsonFrag text assembly - MirBuilderMinBox: lightweight using set for dev env - Registry-only fast path with [registry:*] tag observation - pattern_util_box.hako: enhanced pattern matching Dev Environment & Testing - Dev toggles: SMOKES_DEV_PREINCLUDE=1 (point-enable), HAKO_MIR_BUILDER_SKIP_LOOPS=1 - phase2160: registry opt-in tests (array/map get/set/push/len) - content verification - phase2034: rc-dependent → token grep (grep -F based validation) - run_quick.sh: fast smoke testing harness - ENV documentation: docs/ENV_VARS.md Test Results ✅ quick phase2034: ALL GREEN (MirBuilder internal patterns) ✅ registry phase2160: ALL GREEN (array/map get/set/push/len) ✅ rc-dependent tests → content token verification complete ✅ PREINCLUDE policy: default OFF, point-enable only where needed Technical Notes - No INCLUDE by default (maintain minimalism) - FAIL_FAST=0 in Bring-up contexts only (explicit dev toggles) - Tag-based route observation ([mirbuilder/min:*], [registry:*]) - MIR structure validation (not just rc parity) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-10 19:42:42 +09:00
if fast_on and resolver is not None and hasattr(resolver, 'string_ptrs'):
try:
ptr = resolver.string_ptrs.get(int(box_vid))
except Exception:
ptr = None
feat(phase21.5): strlen FAST EXE + loop JSONFrag diagnostics ## Task A: emit v0 boxcall (bin version) ✅ - Fix: emit_mir_json_for_harness_bin now handles I::Call with Callee::Method - Added: Proper v0 boxcall emission when NYASH_MIR_UNIFIED_CALL=0 - Location: src/runner/mir_json_emit.rs:641-707 - Test: emit_boxcall_length_canary_vm.sh → PASS ## Task B: strlen FAST EXE (AOT without plugin) ✅ - Fix: FAST lowering now tracks newbox(StringBox) creation - Added: newbox_string_args fallback in boxcall.py (lines 133-143) - Added: StringBox tracking in newbox.py (lines 82-91) - Benefit: EXE can compute string.length() without StringBox plugin - Test: s3_backend_selector_crate_exe_strlen_fast_canary_vm.sh → PASS (rc=5) ## Task 1: selfhost-first Diagnostic Logging ✅ - Added: HAKO_SELFHOST_TRACE=1 outputs Program JSON stats - Added: HAKO_SELFHOST_NO_DELEGATE=1 shows detailed failure logs - Added: [builder/selfhost-first:fail:*] markers + last 80 lines - Location: tools/hakorune_emit_mir.sh:try_selfhost_builder() ## Task 2: loop JsonFrag Hit Rate Improvement ✅ - Added: FORCE=1 fallback for non-Lt comparison operators - Added: find_any_local_int_before() fallback when strict fails - Location: lang/src/mir/builder/internal/lower_loop_simple_box.hako - Benefit: Higher JSONFrag hit rate under HAKO_MIR_BUILDER_LOOP_FORCE_JSONFRAG=1 ## Task 3: crate EXE Failure Diagnostics ✅ - Added: LLVM IR dump on build failure (first 120 lines) - Added: Build error log capture (last 40 lines) - Location: tools/smokes/v2/profiles/quick/core/phase2100/stageb_loop_jsonfrag_crate_exe_canary_vm.sh ## Test Results - emit_boxcall_length: PASS ✅ - strlen_fast (FAST=1): PASS (rc=5) ✅ - loop_jsonfrag: SKIP (diagnostic enhanced) ⚠️ ## Implementation Principles - 既定挙動不変 (Default unchanged) - Dev toggle guarded (FAST=1, FORCE=1, TRACE=1, NO_DELEGATE=1) - Minimal diff, easy rollback - Clear failure diagnostics for future fixes 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-11 05:50:23 +09:00
# Fallback: If not found, check if receiver came from newbox(StringBox) with const string arg
# This handles AOT/EXE scenarios where StringBox plugin isn't loaded
if ptr is None and hasattr(resolver, 'newbox_string_args'):
try:
# Check if box_vid is a result of newbox(StringBox, [string_vid])
arg_vid = resolver.newbox_string_args.get(int(box_vid))
if arg_vid is not None:
# Try to get the string ptr from the argument
ptr = resolver.string_ptrs.get(int(arg_vid))
except Exception:
pass
feat(phase21.5/22.1): MirBuilder JsonFrag refactor + FileBox ring-1 + registry tests Phase 21.5 (AOT/LLVM Optimization Prep) - FileBox ring-1 (core-ro) provider: priority=-100, always available, no panic path - src/runner/modes/common_util/provider_registry.rs: CoreRoFileProviderFactory - Auto-registers at startup, eliminates fallback panic structurally - StringBox fast path prototypes (length/size optimization) - Performance benchmarks (C/Python/Hako comparison baseline) Phase 22.1 (JsonFrag Unification) - JsonFrag.last_index_of_from() for backward search (VM fallback) - Replace hand-written lastIndexOf in lower_loop_sum_bc_box.hako - SentinelExtractorBox for Break/Continue pattern extraction MirBuilder Refactor (Box → JsonFrag Migration) - 20+ lower_*_box.hako: Box-heavy → JsonFrag text assembly - MirBuilderMinBox: lightweight using set for dev env - Registry-only fast path with [registry:*] tag observation - pattern_util_box.hako: enhanced pattern matching Dev Environment & Testing - Dev toggles: SMOKES_DEV_PREINCLUDE=1 (point-enable), HAKO_MIR_BUILDER_SKIP_LOOPS=1 - phase2160: registry opt-in tests (array/map get/set/push/len) - content verification - phase2034: rc-dependent → token grep (grep -F based validation) - run_quick.sh: fast smoke testing harness - ENV documentation: docs/ENV_VARS.md Test Results ✅ quick phase2034: ALL GREEN (MirBuilder internal patterns) ✅ registry phase2160: ALL GREEN (array/map get/set/push/len) ✅ rc-dependent tests → content token verification complete ✅ PREINCLUDE policy: default OFF, point-enable only where needed Technical Notes - No INCLUDE by default (maintain minimalism) - FAIL_FAST=0 in Bring-up contexts only (explicit dev toggles) - Tag-based route observation ([mirbuilder/min:*], [registry:*]) - MIR structure validation (not just rc parity) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-10 19:42:42 +09:00
if ptr is not None:
mode = 1 if os.environ.get('NYASH_STR_CP') == '1' else 0
mode_c = ir.Constant(i64, mode)
feat(phase21.5): strlen FAST EXE + loop JSONFrag diagnostics ## Task A: emit v0 boxcall (bin version) ✅ - Fix: emit_mir_json_for_harness_bin now handles I::Call with Callee::Method - Added: Proper v0 boxcall emission when NYASH_MIR_UNIFIED_CALL=0 - Location: src/runner/mir_json_emit.rs:641-707 - Test: emit_boxcall_length_canary_vm.sh → PASS ## Task B: strlen FAST EXE (AOT without plugin) ✅ - Fix: FAST lowering now tracks newbox(StringBox) creation - Added: newbox_string_args fallback in boxcall.py (lines 133-143) - Added: StringBox tracking in newbox.py (lines 82-91) - Benefit: EXE can compute string.length() without StringBox plugin - Test: s3_backend_selector_crate_exe_strlen_fast_canary_vm.sh → PASS (rc=5) ## Task 1: selfhost-first Diagnostic Logging ✅ - Added: HAKO_SELFHOST_TRACE=1 outputs Program JSON stats - Added: HAKO_SELFHOST_NO_DELEGATE=1 shows detailed failure logs - Added: [builder/selfhost-first:fail:*] markers + last 80 lines - Location: tools/hakorune_emit_mir.sh:try_selfhost_builder() ## Task 2: loop JsonFrag Hit Rate Improvement ✅ - Added: FORCE=1 fallback for non-Lt comparison operators - Added: find_any_local_int_before() fallback when strict fails - Location: lang/src/mir/builder/internal/lower_loop_simple_box.hako - Benefit: Higher JSONFrag hit rate under HAKO_MIR_BUILDER_LOOP_FORCE_JSONFRAG=1 ## Task 3: crate EXE Failure Diagnostics ✅ - Added: LLVM IR dump on build failure (first 120 lines) - Added: Build error log capture (last 40 lines) - Location: tools/smokes/v2/profiles/quick/core/phase2100/stageb_loop_jsonfrag_crate_exe_canary_vm.sh ## Test Results - emit_boxcall_length: PASS ✅ - strlen_fast (FAST=1): PASS (rc=5) ✅ - loop_jsonfrag: SKIP (diagnostic enhanced) ⚠️ ## Implementation Principles - 既定挙動不変 (Default unchanged) - Dev toggle guarded (FAST=1, FORCE=1, TRACE=1, NO_DELEGATE=1) - Minimal diff, easy rollback - Clear failure diagnostics for future fixes 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-11 05:50:23 +09:00
# Prefer neutral kernel symbol; legacy name kept in NyRT for compatibility
callee = _declare(module, "nyrt_string_length", i64, [i8p, i64])
feat(phase21.5/22.1): MirBuilder JsonFrag refactor + FileBox ring-1 + registry tests Phase 21.5 (AOT/LLVM Optimization Prep) - FileBox ring-1 (core-ro) provider: priority=-100, always available, no panic path - src/runner/modes/common_util/provider_registry.rs: CoreRoFileProviderFactory - Auto-registers at startup, eliminates fallback panic structurally - StringBox fast path prototypes (length/size optimization) - Performance benchmarks (C/Python/Hako comparison baseline) Phase 22.1 (JsonFrag Unification) - JsonFrag.last_index_of_from() for backward search (VM fallback) - Replace hand-written lastIndexOf in lower_loop_sum_bc_box.hako - SentinelExtractorBox for Break/Continue pattern extraction MirBuilder Refactor (Box → JsonFrag Migration) - 20+ lower_*_box.hako: Box-heavy → JsonFrag text assembly - MirBuilderMinBox: lightweight using set for dev env - Registry-only fast path with [registry:*] tag observation - pattern_util_box.hako: enhanced pattern matching Dev Environment & Testing - Dev toggles: SMOKES_DEV_PREINCLUDE=1 (point-enable), HAKO_MIR_BUILDER_SKIP_LOOPS=1 - phase2160: registry opt-in tests (array/map get/set/push/len) - content verification - phase2034: rc-dependent → token grep (grep -F based validation) - run_quick.sh: fast smoke testing harness - ENV documentation: docs/ENV_VARS.md Test Results ✅ quick phase2034: ALL GREEN (MirBuilder internal patterns) ✅ registry phase2160: ALL GREEN (array/map get/set/push/len) ✅ rc-dependent tests → content token verification complete ✅ PREINCLUDE policy: default OFF, point-enable only where needed Technical Notes - No INCLUDE by default (maintain minimalism) - FAIL_FAST=0 in Bring-up contexts only (explicit dev toggles) - Tag-based route observation ([mirbuilder/min:*], [registry:*]) - MIR structure validation (not just rc parity) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-10 19:42:42 +09:00
result = builder.call(callee, [ptr, mode_c], name="strlen_si")
if dst_vid is not None:
vmap[dst_vid] = result
return
# Default: Any.length_h(handle) → i64
recv_h = _ensure_handle(builder, module, recv_val)
callee = _declare(module, "nyash.any.length_h", i64, [i64])
result = builder.call(callee, [recv_h], name="any_length_h")
if dst_vid is not None:
vmap[dst_vid] = result
return
🔍 Research: GPT-5-Codex capabilities and GitHub PR integration ## Summary Investigated OpenAI's new GPT-5-Codex model and Codex GitHub PR review integration capabilities. ## GPT-5-Codex Analysis ### Benchmark Performance (Good) - SWE-bench Verified: 74.5% (vs GPT-5's 72.8%) - Refactoring tasks: 51.3% (vs GPT-5's 33.9%) - Code review: Higher developer ratings ### Real-World Issues (Concerning) - Users report degraded coding performance - Scripts that previously worked now fail - Less consistent than GPT-4.5 - Longer response times (minutes vs instant) - "Creatively and emotionally flat" - Basic errors (e.g., counting letters incorrectly) ### Key Finding Classic case of "optimizing for benchmarks vs real usability" - scores well on tests but performs poorly in practice. ## Codex GitHub PR Integration ### Setup Process 1. Enable MFA and connect GitHub account 2. Authorize Codex GitHub app for repos 3. Enable "Code review" in repository settings ### Usage Methods - **Manual**: Comment '@codex review' in PR - **Automatic**: Triggers when PR moves from draft to ready ### Current Limitations - One-way communication (doesn't respond to review comments) - Prefers creating new PRs over updating existing ones - Better for single-pass reviews than iterative feedback ## 'codex resume' Feature New session management capability: - Resume previous codex exec sessions - Useful for continuing long tasks across days - Maintains context from interrupted work 🐱 The investigation reveals that while GPT-5-Codex shows benchmark improvements, practical developer experience has declined - a reminder that metrics don't always reflect real-world utility\!
2025-09-16 16:28:25 +09:00
if method_name == "size":
# Map/Array size via any.length_h
recv_h = _ensure_handle(builder, module, recv_val)
callee = _declare(module, "nyash.any.length_h", i64, [i64])
result = builder.call(callee, [recv_h], name="any_size_h")
if dst_vid is not None:
vmap[dst_vid] = result
return
if method_name == "substring":
# substring(start, end)
# If receiver is a handle (i64), use handle-based helper; else pointer-based API
s = _res_i64(args[0]) if args else ir.Constant(i64, 0)
if s is None:
s = vmap.get(args[0], ir.Constant(i64, 0)) if args else ir.Constant(i64, 0)
e = _res_i64(args[1]) if len(args) > 1 else ir.Constant(i64, 0)
if e is None:
e = vmap.get(args[1], ir.Constant(i64, 0)) if len(args) > 1 else ir.Constant(i64, 0)
if hasattr(recv_val, 'type') and isinstance(recv_val.type, ir.IntType):
# handle-based
callee = _declare(module, "nyash.string.substring_hii", i64, [i64, i64, i64])
h = builder.call(callee, [recv_val, s, e], name="substring_h")
if dst_vid is not None:
vmap[dst_vid] = h
try:
if resolver is not None and hasattr(resolver, 'mark_string'):
resolver.mark_string(dst_vid)
except Exception:
pass
return
else:
# pointer-based
recv_p = recv_val
if hasattr(recv_p, 'type') and isinstance(recv_p.type, ir.PointerType):
try:
if isinstance(recv_p.type.pointee, ir.ArrayType):
c0 = ir.Constant(ir.IntType(32), 0)
recv_p = builder.gep(recv_p, [c0, c0], name="bc_gep_recv")
except Exception:
pass
else:
recv_p = ir.Constant(i8p, None)
# Coerce indices
if hasattr(s, 'type') and isinstance(s.type, ir.PointerType):
s = builder.ptrtoint(s, i64)
if hasattr(e, 'type') and isinstance(e.type, ir.PointerType):
e = builder.ptrtoint(e, i64)
callee = _declare(module, "nyash.string.substring_sii", i8p, [i8p, i64, i64])
p = builder.call(callee, [recv_p, s, e], name="substring")
conv = _declare(module, "nyash.box.from_i8_string", i64, [i8p])
h = builder.call(conv, [p], name="str_ptr2h_sub")
if dst_vid is not None:
vmap[dst_vid] = h
try:
if resolver is not None and hasattr(resolver, 'mark_string'):
resolver.mark_string(dst_vid)
if resolver is not None and hasattr(resolver, 'string_ptrs'):
resolver.string_ptrs[int(dst_vid)] = p
except Exception:
pass
return
if method_name == "lastIndexOf":
# lastIndexOf(needle)
if resolver is not None and preds is not None and block_end_values is not None and bb_map is not None:
n_i64 = resolver.resolve_i64(args[0], builder.block, preds, block_end_values, vmap, bb_map) if args else ir.Constant(i64, 0)
else:
n_i64 = vmap.get(args[0], ir.Constant(i64, 0)) if args else ir.Constant(i64, 0)
if hasattr(recv_val, 'type') and isinstance(recv_val.type, ir.IntType):
# handle-based
callee = _declare(module, "nyash.string.lastIndexOf_hh", i64, [i64, i64])
res = builder.call(callee, [recv_val, n_i64], name="lastIndexOf_hh")
if dst_vid is not None:
vmap[dst_vid] = res
return
else:
# pointer-based
recv_p = recv_val
if hasattr(recv_p, 'type') and isinstance(recv_p.type, ir.PointerType):
try:
if isinstance(recv_p.type.pointee, ir.ArrayType):
c0 = ir.Constant(ir.IntType(32), 0)
recv_p = builder.gep(recv_p, [c0, c0], name="bc_gep_recv2")
except Exception:
pass
else:
recv_p = ir.Constant(i8p, None)
needle = n_i64
if hasattr(needle, 'type') and isinstance(needle.type, ir.IntType):
needle = builder.inttoptr(needle, i8p, name="bc_i2p_needle")
elif hasattr(needle, 'type') and isinstance(needle.type, ir.PointerType):
try:
if isinstance(needle.type.pointee, ir.ArrayType):
c0 = ir.Constant(ir.IntType(32), 0)
needle = builder.gep(needle, [c0, c0], name="bc_gep_needle")
except Exception:
pass
callee = _declare(module, "nyash.string.lastIndexOf_ss", i64, [i8p, i8p])
res = builder.call(callee, [recv_p, needle], name="lastIndexOf")
if dst_vid is not None:
vmap[dst_vid] = res
return
if method_name == "get":
# ArrayBox.get(index) → nyash.array.get_h(handle, idx)
🔍 Research: GPT-5-Codex capabilities and GitHub PR integration ## Summary Investigated OpenAI's new GPT-5-Codex model and Codex GitHub PR review integration capabilities. ## GPT-5-Codex Analysis ### Benchmark Performance (Good) - SWE-bench Verified: 74.5% (vs GPT-5's 72.8%) - Refactoring tasks: 51.3% (vs GPT-5's 33.9%) - Code review: Higher developer ratings ### Real-World Issues (Concerning) - Users report degraded coding performance - Scripts that previously worked now fail - Less consistent than GPT-4.5 - Longer response times (minutes vs instant) - "Creatively and emotionally flat" - Basic errors (e.g., counting letters incorrectly) ### Key Finding Classic case of "optimizing for benchmarks vs real usability" - scores well on tests but performs poorly in practice. ## Codex GitHub PR Integration ### Setup Process 1. Enable MFA and connect GitHub account 2. Authorize Codex GitHub app for repos 3. Enable "Code review" in repository settings ### Usage Methods - **Manual**: Comment '@codex review' in PR - **Automatic**: Triggers when PR moves from draft to ready ### Current Limitations - One-way communication (doesn't respond to review comments) - Prefers creating new PRs over updating existing ones - Better for single-pass reviews than iterative feedback ## 'codex resume' Feature New session management capability: - Resume previous codex exec sessions - Useful for continuing long tasks across days - Maintains context from interrupted work 🐱 The investigation reveals that while GPT-5-Codex shows benchmark improvements, practical developer experience has declined - a reminder that metrics don't always reflect real-world utility\!
2025-09-16 16:28:25 +09:00
# MapBox.get(key) → nyash.map.get_hh(handle, key_any)
recv_h = _ensure_handle(builder, module, recv_val)
k = _res_i64(args[0]) if args else ir.Constant(i64, 0)
if k is None:
🔍 Research: GPT-5-Codex capabilities and GitHub PR integration ## Summary Investigated OpenAI's new GPT-5-Codex model and Codex GitHub PR review integration capabilities. ## GPT-5-Codex Analysis ### Benchmark Performance (Good) - SWE-bench Verified: 74.5% (vs GPT-5's 72.8%) - Refactoring tasks: 51.3% (vs GPT-5's 33.9%) - Code review: Higher developer ratings ### Real-World Issues (Concerning) - Users report degraded coding performance - Scripts that previously worked now fail - Less consistent than GPT-4.5 - Longer response times (minutes vs instant) - "Creatively and emotionally flat" - Basic errors (e.g., counting letters incorrectly) ### Key Finding Classic case of "optimizing for benchmarks vs real usability" - scores well on tests but performs poorly in practice. ## Codex GitHub PR Integration ### Setup Process 1. Enable MFA and connect GitHub account 2. Authorize Codex GitHub app for repos 3. Enable "Code review" in repository settings ### Usage Methods - **Manual**: Comment '@codex review' in PR - **Automatic**: Triggers when PR moves from draft to ready ### Current Limitations - One-way communication (doesn't respond to review comments) - Prefers creating new PRs over updating existing ones - Better for single-pass reviews than iterative feedback ## 'codex resume' Feature New session management capability: - Resume previous codex exec sessions - Useful for continuing long tasks across days - Maintains context from interrupted work 🐱 The investigation reveals that while GPT-5-Codex shows benchmark improvements, practical developer experience has declined - a reminder that metrics don't always reflect real-world utility\!
2025-09-16 16:28:25 +09:00
k = vmap.get(args[0], ir.Constant(i64, 0)) if args else ir.Constant(i64, 0)
callee_map = _declare(module, "nyash.map.get_hh", i64, [i64, i64])
res = builder.call(callee_map, [recv_h, k], name="map_get_hh")
if dst_vid is not None:
vmap[dst_vid] = res
return
if method_name == "push":
# ArrayBox.push(val) → nyash.array.push_h(handle, val)
recv_h = _ensure_handle(builder, module, recv_val)
v0 = _res_i64(args[0]) if args else ir.Constant(i64, 0)
if v0 is None:
🔍 Research: GPT-5-Codex capabilities and GitHub PR integration ## Summary Investigated OpenAI's new GPT-5-Codex model and Codex GitHub PR review integration capabilities. ## GPT-5-Codex Analysis ### Benchmark Performance (Good) - SWE-bench Verified: 74.5% (vs GPT-5's 72.8%) - Refactoring tasks: 51.3% (vs GPT-5's 33.9%) - Code review: Higher developer ratings ### Real-World Issues (Concerning) - Users report degraded coding performance - Scripts that previously worked now fail - Less consistent than GPT-4.5 - Longer response times (minutes vs instant) - "Creatively and emotionally flat" - Basic errors (e.g., counting letters incorrectly) ### Key Finding Classic case of "optimizing for benchmarks vs real usability" - scores well on tests but performs poorly in practice. ## Codex GitHub PR Integration ### Setup Process 1. Enable MFA and connect GitHub account 2. Authorize Codex GitHub app for repos 3. Enable "Code review" in repository settings ### Usage Methods - **Manual**: Comment '@codex review' in PR - **Automatic**: Triggers when PR moves from draft to ready ### Current Limitations - One-way communication (doesn't respond to review comments) - Prefers creating new PRs over updating existing ones - Better for single-pass reviews than iterative feedback ## 'codex resume' Feature New session management capability: - Resume previous codex exec sessions - Useful for continuing long tasks across days - Maintains context from interrupted work 🐱 The investigation reveals that while GPT-5-Codex shows benchmark improvements, practical developer experience has declined - a reminder that metrics don't always reflect real-world utility\!
2025-09-16 16:28:25 +09:00
v0 = vmap.get(args[0], ir.Constant(i64, 0)) if args else ir.Constant(i64, 0)
callee = _declare(module, "nyash.array.push_h", i64, [i64, i64])
res = builder.call(callee, [recv_h, v0], name="arr_push_h")
if dst_vid is not None:
vmap[dst_vid] = res
return
🔍 Research: GPT-5-Codex capabilities and GitHub PR integration ## Summary Investigated OpenAI's new GPT-5-Codex model and Codex GitHub PR review integration capabilities. ## GPT-5-Codex Analysis ### Benchmark Performance (Good) - SWE-bench Verified: 74.5% (vs GPT-5's 72.8%) - Refactoring tasks: 51.3% (vs GPT-5's 33.9%) - Code review: Higher developer ratings ### Real-World Issues (Concerning) - Users report degraded coding performance - Scripts that previously worked now fail - Less consistent than GPT-4.5 - Longer response times (minutes vs instant) - "Creatively and emotionally flat" - Basic errors (e.g., counting letters incorrectly) ### Key Finding Classic case of "optimizing for benchmarks vs real usability" - scores well on tests but performs poorly in practice. ## Codex GitHub PR Integration ### Setup Process 1. Enable MFA and connect GitHub account 2. Authorize Codex GitHub app for repos 3. Enable "Code review" in repository settings ### Usage Methods - **Manual**: Comment '@codex review' in PR - **Automatic**: Triggers when PR moves from draft to ready ### Current Limitations - One-way communication (doesn't respond to review comments) - Prefers creating new PRs over updating existing ones - Better for single-pass reviews than iterative feedback ## 'codex resume' Feature New session management capability: - Resume previous codex exec sessions - Useful for continuing long tasks across days - Maintains context from interrupted work 🐱 The investigation reveals that while GPT-5-Codex shows benchmark improvements, practical developer experience has declined - a reminder that metrics don't always reflect real-world utility\!
2025-09-16 16:28:25 +09:00
if method_name == "set":
# MapBox.set(key, val) → nyash.map.set_hh(handle, key_any, val_any)
recv_h = _ensure_handle(builder, module, recv_val)
k = _res_i64(args[0]) if len(args) > 0 else ir.Constant(i64, 0)
if k is None:
🔍 Research: GPT-5-Codex capabilities and GitHub PR integration ## Summary Investigated OpenAI's new GPT-5-Codex model and Codex GitHub PR review integration capabilities. ## GPT-5-Codex Analysis ### Benchmark Performance (Good) - SWE-bench Verified: 74.5% (vs GPT-5's 72.8%) - Refactoring tasks: 51.3% (vs GPT-5's 33.9%) - Code review: Higher developer ratings ### Real-World Issues (Concerning) - Users report degraded coding performance - Scripts that previously worked now fail - Less consistent than GPT-4.5 - Longer response times (minutes vs instant) - "Creatively and emotionally flat" - Basic errors (e.g., counting letters incorrectly) ### Key Finding Classic case of "optimizing for benchmarks vs real usability" - scores well on tests but performs poorly in practice. ## Codex GitHub PR Integration ### Setup Process 1. Enable MFA and connect GitHub account 2. Authorize Codex GitHub app for repos 3. Enable "Code review" in repository settings ### Usage Methods - **Manual**: Comment '@codex review' in PR - **Automatic**: Triggers when PR moves from draft to ready ### Current Limitations - One-way communication (doesn't respond to review comments) - Prefers creating new PRs over updating existing ones - Better for single-pass reviews than iterative feedback ## 'codex resume' Feature New session management capability: - Resume previous codex exec sessions - Useful for continuing long tasks across days - Maintains context from interrupted work 🐱 The investigation reveals that while GPT-5-Codex shows benchmark improvements, practical developer experience has declined - a reminder that metrics don't always reflect real-world utility\!
2025-09-16 16:28:25 +09:00
k = vmap.get(args[0], ir.Constant(i64, 0)) if len(args) > 0 else ir.Constant(i64, 0)
v = _res_i64(args[1]) if len(args) > 1 else ir.Constant(i64, 0)
if v is None:
🔍 Research: GPT-5-Codex capabilities and GitHub PR integration ## Summary Investigated OpenAI's new GPT-5-Codex model and Codex GitHub PR review integration capabilities. ## GPT-5-Codex Analysis ### Benchmark Performance (Good) - SWE-bench Verified: 74.5% (vs GPT-5's 72.8%) - Refactoring tasks: 51.3% (vs GPT-5's 33.9%) - Code review: Higher developer ratings ### Real-World Issues (Concerning) - Users report degraded coding performance - Scripts that previously worked now fail - Less consistent than GPT-4.5 - Longer response times (minutes vs instant) - "Creatively and emotionally flat" - Basic errors (e.g., counting letters incorrectly) ### Key Finding Classic case of "optimizing for benchmarks vs real usability" - scores well on tests but performs poorly in practice. ## Codex GitHub PR Integration ### Setup Process 1. Enable MFA and connect GitHub account 2. Authorize Codex GitHub app for repos 3. Enable "Code review" in repository settings ### Usage Methods - **Manual**: Comment '@codex review' in PR - **Automatic**: Triggers when PR moves from draft to ready ### Current Limitations - One-way communication (doesn't respond to review comments) - Prefers creating new PRs over updating existing ones - Better for single-pass reviews than iterative feedback ## 'codex resume' Feature New session management capability: - Resume previous codex exec sessions - Useful for continuing long tasks across days - Maintains context from interrupted work 🐱 The investigation reveals that while GPT-5-Codex shows benchmark improvements, practical developer experience has declined - a reminder that metrics don't always reflect real-world utility\!
2025-09-16 16:28:25 +09:00
v = vmap.get(args[1], ir.Constant(i64, 0)) if len(args) > 1 else ir.Constant(i64, 0)
callee = _declare(module, "nyash.map.set_hh", i64, [i64, i64, i64])
res = builder.call(callee, [recv_h, k, v], name="map_set_hh")
if dst_vid is not None:
vmap[dst_vid] = res
return
if method_name == "has":
# MapBox.has(key) → nyash.map.has_hh(handle, key_any)
recv_h = _ensure_handle(builder, module, recv_val)
k = _res_i64(args[0]) if args else ir.Constant(i64, 0)
if k is None:
🔍 Research: GPT-5-Codex capabilities and GitHub PR integration ## Summary Investigated OpenAI's new GPT-5-Codex model and Codex GitHub PR review integration capabilities. ## GPT-5-Codex Analysis ### Benchmark Performance (Good) - SWE-bench Verified: 74.5% (vs GPT-5's 72.8%) - Refactoring tasks: 51.3% (vs GPT-5's 33.9%) - Code review: Higher developer ratings ### Real-World Issues (Concerning) - Users report degraded coding performance - Scripts that previously worked now fail - Less consistent than GPT-4.5 - Longer response times (minutes vs instant) - "Creatively and emotionally flat" - Basic errors (e.g., counting letters incorrectly) ### Key Finding Classic case of "optimizing for benchmarks vs real usability" - scores well on tests but performs poorly in practice. ## Codex GitHub PR Integration ### Setup Process 1. Enable MFA and connect GitHub account 2. Authorize Codex GitHub app for repos 3. Enable "Code review" in repository settings ### Usage Methods - **Manual**: Comment '@codex review' in PR - **Automatic**: Triggers when PR moves from draft to ready ### Current Limitations - One-way communication (doesn't respond to review comments) - Prefers creating new PRs over updating existing ones - Better for single-pass reviews than iterative feedback ## 'codex resume' Feature New session management capability: - Resume previous codex exec sessions - Useful for continuing long tasks across days - Maintains context from interrupted work 🐱 The investigation reveals that while GPT-5-Codex shows benchmark improvements, practical developer experience has declined - a reminder that metrics don't always reflect real-world utility\!
2025-09-16 16:28:25 +09:00
k = vmap.get(args[0], ir.Constant(i64, 0)) if args else ir.Constant(i64, 0)
callee = _declare(module, "nyash.map.has_hh", i64, [i64, i64])
res = builder.call(callee, [recv_h, k], name="map_has_hh")
if dst_vid is not None:
vmap[dst_vid] = res
return
if method_name in ("print", "println", "log"):
# Console mapping (prefer pointer-API when possible to avoid handle registry mismatch)
use_ptr = False
arg0_vid = args[0] if args else None
arg0_ptr = None
if resolver is not None and hasattr(resolver, 'string_ptrs') and arg0_vid is not None:
try:
arg0_ptr = resolver.string_ptrs.get(int(arg0_vid))
if arg0_ptr is not None:
use_ptr = True
except Exception:
pass
if use_ptr and arg0_ptr is not None:
callee = _declare(module, "nyash.console.log", i64, [i8p])
_ = builder.call(callee, [arg0_ptr], name="console_log_ptr")
else:
# Fallback: prefer raw vmap value; resolve only if missing (avoid synthesizing PHIs here)
arg0 = vmap.get(args[0]) if args else None
if arg0 is None and resolver is not None and preds is not None and block_end_values is not None and bb_map is not None:
arg0 = resolver.resolve_i64(args[0], builder.block, preds, block_end_values, vmap, bb_map)
if arg0 is None:
arg0 = ir.Constant(i64, 0)
# If we have a handle (i64), convert to i8* via bridge and log via pointer API
if hasattr(arg0, 'type') and isinstance(arg0.type, ir.IntType):
if arg0.type.width != 64:
arg0 = builder.zext(arg0, i64)
bridge = _declare(module, "nyash.string.to_i8p_h", i8p, [i64])
p = builder.call(bridge, [arg0], name="str_h2p_for_log")
callee = _declare(module, "nyash.console.log", i64, [i8p])
_ = builder.call(callee, [p], name="console_log_p")
else:
# Non-integer value: coerce to i8* and log
if hasattr(arg0, 'type') and isinstance(arg0.type, ir.IntType):
arg0 = builder.inttoptr(arg0, i8p)
callee = _declare(module, "nyash.console.log", i64, [i8p])
_ = builder.call(callee, [arg0], name="console_log")
if dst_vid is not None:
vmap[dst_vid] = ir.Constant(i64, 0)
return
# Special: method on `me` (self) or static dispatch to Main.* → direct call to `Main.method/arity`
try:
cur_fn_name = str(builder.block.parent.name)
except Exception:
cur_fn_name = ''
# Heuristic: MIR encodes `me` as a string literal "__me__" or sometimes value-id 0.
is_me = False
try:
if box_vid == 0:
is_me = True
# Prefer literal marker captured by resolver (from const lowering)
elif resolver is not None and hasattr(resolver, 'string_literals'):
lit = resolver.string_literals.get(box_vid)
if lit == "__me__":
is_me = True
except Exception:
pass
if is_me and cur_fn_name.startswith('Main.'):
feat(naming): Python NamingHelper実装 - Rust NamingBoxのミラー完成 Phase 25.4 メンテナンス: Python LLVM側のNamingBox SSOT統一 ## 📦 実装内容 ### 1. Python NamingHelper作成 - 新規作成: `src/llvm_py/naming_helper.py` - Rust `src/mir/naming.rs` と完全同一の意味論を実装 - 3つの関数: - `encode_static_method(box_name, method, arity)` → "BoxName.method/arity" - `canonical_box_name(raw)` → "main" → "Main" - `normalize_static_global_name(func_name)` → "main._nop/0" → "Main._nop/0" - doctest 9個全てPASS ✅ ### 2. Python LLVM側の統一修正 - `instructions/boxcall.py:437` - f"Main.{method_name}/{arity}" → encode_static_method() - `instructions/call.py:170-173` - traced_names タプル生成をNamingHelper経由に変更 - `pyvm/intrinsic.py:17, 50` - "Main.esc_json/1", "Main.dirname/1" → encode_static_method() - `builders/entry.py:16` - 'Main.main/1' → encode_static_method("Main", "main", 1) ## 🎯 技術的成果 - **意味論一致**: Rust ↔ Python で完全同一の命名規則 - **保守性向上**: ハードコード4箇所 → NamingHelper一元管理 - **テスト完備**: doctest 9個でRust NamingBoxと同一動作を保証 ## テスト結果 ✅ python3 -m py_compile: 全ファイル構文OK ✅ python3 -m doctest naming_helper.py: 9 tests passed ## 参考 - Phase 25.4-A (Rust側): fa9cea51, bceb20ed - Rust NamingBox SSOT: src/mir/naming.rs 🎉 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 09:38:49 +09:00
# NamingBox SSOT: Build target function name with arity
arity = len(args)
feat(naming): Python NamingHelper実装 - Rust NamingBoxのミラー完成 Phase 25.4 メンテナンス: Python LLVM側のNamingBox SSOT統一 ## 📦 実装内容 ### 1. Python NamingHelper作成 - 新規作成: `src/llvm_py/naming_helper.py` - Rust `src/mir/naming.rs` と完全同一の意味論を実装 - 3つの関数: - `encode_static_method(box_name, method, arity)` → "BoxName.method/arity" - `canonical_box_name(raw)` → "main" → "Main" - `normalize_static_global_name(func_name)` → "main._nop/0" → "Main._nop/0" - doctest 9個全てPASS ✅ ### 2. Python LLVM側の統一修正 - `instructions/boxcall.py:437` - f"Main.{method_name}/{arity}" → encode_static_method() - `instructions/call.py:170-173` - traced_names タプル生成をNamingHelper経由に変更 - `pyvm/intrinsic.py:17, 50` - "Main.esc_json/1", "Main.dirname/1" → encode_static_method() - `builders/entry.py:16` - 'Main.main/1' → encode_static_method("Main", "main", 1) ## 🎯 技術的成果 - **意味論一致**: Rust ↔ Python で完全同一の命名規則 - **保守性向上**: ハードコード4箇所 → NamingHelper一元管理 - **テスト完備**: doctest 9個でRust NamingBoxと同一動作を保証 ## テスト結果 ✅ python3 -m py_compile: 全ファイル構文OK ✅ python3 -m doctest naming_helper.py: 9 tests passed ## 参考 - Phase 25.4-A (Rust側): fa9cea51, bceb20ed - Rust NamingBox SSOT: src/mir/naming.rs 🎉 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-21 09:38:49 +09:00
target = encode_static_method("Main", method_name, arity)
# If module already has such function, prefer direct call
callee = None
for f in module.functions:
if f.name == target:
callee = f
break
if callee is not None:
a = []
for i, aid in enumerate(args):
raw = vmap.get(aid)
if raw is not None and hasattr(raw, 'type') and isinstance(raw.type, ir.PointerType):
aval = _ensure_handle(builder, module, raw)
else:
if resolver is not None and preds is not None and block_end_values is not None and bb_map is not None:
aval = resolver.resolve_i64(aid, builder.block, preds, block_end_values, vmap, bb_map)
else:
aval = vmap.get(aid, ir.Constant(ir.IntType(64), 0))
if hasattr(aval, 'type') and isinstance(aval.type, ir.PointerType):
aval = _ensure_handle(builder, module, aval)
elif hasattr(aval, 'type') and isinstance(aval.type, ir.IntType) and aval.type.width != 64:
aval = builder.zext(aval, ir.IntType(64)) if aval.type.width < 64 else builder.trunc(aval, ir.IntType(64))
a.append(aval)
res = builder.call(callee, a, name=f"call_self_{method_name}")
if dst_vid is not None:
vmap[dst_vid] = res
try:
if method_name in ("esc_json", "node_json", "dirname", "join", "read_all") and resolver is not None and hasattr(resolver, 'mark_string'):
resolver.mark_string(dst_vid)
except Exception:
pass
return
# Default: invoke via NyRT by-name shim (runtime resolves method id)
recv_h = _ensure_handle(builder, module, recv_val)
# Build C string for method name
mbytes = (method_name + "\0").encode('utf-8')
arr_ty = ir.ArrayType(ir.IntType(8), len(mbytes))
try:
fn = builder.block.parent
fn_name = getattr(fn, 'name', 'fn')
except Exception:
fn_name = 'fn'
base = f".meth_{fn_name}_{method_name}"
existing = {g.name for g in module.global_values}
gname = base
k = 1
while gname in existing:
gname = f"{base}.{k}"; k += 1
g = ir.GlobalVariable(module, arr_ty, name=gname)
g.linkage = 'private'
g.global_constant = True
g.initializer = ir.Constant(arr_ty, bytearray(mbytes))
c0 = ir.Constant(ir.IntType(32), 0)
# Compute GEP in the current block so it is naturally ordered before the call
# Use constant GEP so we don't depend on instruction ordering
mptr = ir.Constant.gep(g, (c0, c0))
# Up to 2 args for minimal path
argc = ir.Constant(i64, min(len(args), 2))
if resolver is not None and preds is not None and block_end_values is not None and bb_map is not None:
a1 = resolver.resolve_i64(args[0], builder.block, preds, block_end_values, vmap, bb_map) if len(args) >= 1 else ir.Constant(i64, 0)
a2 = resolver.resolve_i64(args[1], builder.block, preds, block_end_values, vmap, bb_map) if len(args) >= 2 else ir.Constant(i64, 0)
else:
a1 = vmap.get(args[0], ir.Constant(i64, 0)) if len(args) >= 1 else ir.Constant(i64, 0)
a2 = vmap.get(args[1], ir.Constant(i64, 0)) if len(args) >= 2 else ir.Constant(i64, 0)
if hasattr(a1, 'type') and isinstance(a1.type, ir.PointerType):
a1 = builder.ptrtoint(a1, i64)
if hasattr(a2, 'type') and isinstance(a2.type, ir.PointerType):
a2 = builder.ptrtoint(a2, i64)
callee = _declare(module, "nyash.plugin.invoke_by_name_i64", i64, [i64, i8p, i64, i64, i64])
result = builder.call(callee, [recv_h, mptr, argc, a1, a2], name="pinvoke_by_name")
if dst_vid is not None:
vmap[dst_vid] = result
# Heuristic tagging: common plugin methods returning strings
try:
if resolver is not None and hasattr(resolver, 'mark_string') and method_name in ("read", "dirname", "join"):
resolver.mark_string(dst_vid)
except Exception:
pass