zexus 1.6.8 → 1.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -5
- package/package.json +1 -1
- package/src/__init__.py +7 -0
- package/src/zexus/__init__.py +1 -1
- package/src/zexus/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/capability_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/debug_sanitizer.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/environment.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/error_reporter.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/input_validation.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/module_cache.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/module_manager.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/object.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/security.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/security_enforcement.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/syntax_validator.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_token.cpython-312.pyc +0 -0
- package/src/zexus/access_control_system/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/access_control_system/__pycache__/access_control.cpython-312.pyc +0 -0
- package/src/zexus/advanced_types.py +17 -2
- package/src/zexus/blockchain/__init__.py +411 -0
- package/src/zexus/blockchain/accelerator.py +1160 -0
- package/src/zexus/blockchain/chain.py +660 -0
- package/src/zexus/blockchain/consensus.py +821 -0
- package/src/zexus/blockchain/contract_vm.py +1019 -0
- package/src/zexus/blockchain/crypto.py +79 -14
- package/src/zexus/blockchain/events.py +526 -0
- package/src/zexus/blockchain/loadtest.py +721 -0
- package/src/zexus/blockchain/monitoring.py +350 -0
- package/src/zexus/blockchain/mpt.py +716 -0
- package/src/zexus/blockchain/multichain.py +951 -0
- package/src/zexus/blockchain/multiprocess_executor.py +338 -0
- package/src/zexus/blockchain/network.py +886 -0
- package/src/zexus/blockchain/node.py +666 -0
- package/src/zexus/blockchain/rpc.py +1203 -0
- package/src/zexus/blockchain/rust_bridge.py +421 -0
- package/src/zexus/blockchain/storage.py +423 -0
- package/src/zexus/blockchain/tokens.py +750 -0
- package/src/zexus/blockchain/upgradeable.py +1004 -0
- package/src/zexus/blockchain/verification.py +1602 -0
- package/src/zexus/blockchain/wallet.py +621 -0
- package/src/zexus/capability_system.py +184 -9
- package/src/zexus/cli/__pycache__/main.cpython-312.pyc +0 -0
- package/src/zexus/cli/main.py +383 -34
- package/src/zexus/cli/zpm.py +1 -1
- package/src/zexus/compiler/__pycache__/bytecode.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/semantic.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/compiler/bytecode.py +124 -7
- package/src/zexus/compiler/compat_runtime.py +6 -2
- package/src/zexus/compiler/lexer.py +16 -5
- package/src/zexus/compiler/parser.py +108 -7
- package/src/zexus/compiler/semantic.py +18 -19
- package/src/zexus/compiler/zexus_ast.py +26 -1
- package/src/zexus/concurrency_system.py +79 -0
- package/src/zexus/config.py +54 -0
- package/src/zexus/crypto_bridge.py +244 -8
- package/src/zexus/dap/__init__.py +10 -0
- package/src/zexus/dap/__main__.py +4 -0
- package/src/zexus/dap/dap_server.py +391 -0
- package/src/zexus/dap/debug_engine.py +298 -0
- package/src/zexus/environment.py +112 -9
- package/src/zexus/evaluator/__pycache__/bytecode_compiler.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/core.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/expressions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/functions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/resource_limiter.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/statements.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/unified_execution.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/utils.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/bytecode_compiler.py +457 -37
- package/src/zexus/evaluator/core.py +644 -50
- package/src/zexus/evaluator/expressions.py +358 -62
- package/src/zexus/evaluator/functions.py +458 -20
- package/src/zexus/evaluator/resource_limiter.py +4 -4
- package/src/zexus/evaluator/statements.py +774 -122
- package/src/zexus/evaluator/unified_execution.py +573 -72
- package/src/zexus/evaluator/utils.py +14 -2
- package/src/zexus/evaluator_original.py +1 -1
- package/src/zexus/event_loop.py +186 -0
- package/src/zexus/lexer.py +742 -458
- package/src/zexus/lsp/__init__.py +1 -1
- package/src/zexus/lsp/definition_provider.py +163 -9
- package/src/zexus/lsp/server.py +22 -8
- package/src/zexus/lsp/symbol_provider.py +182 -9
- package/src/zexus/module_cache.py +239 -9
- package/src/zexus/module_manager.py +129 -1
- package/src/zexus/object.py +76 -6
- package/src/zexus/parser/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_context.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_structural.cpython-312.pyc +0 -0
- package/src/zexus/parser/parser.py +1349 -408
- package/src/zexus/parser/strategy_context.py +755 -58
- package/src/zexus/parser/strategy_structural.py +121 -21
- package/src/zexus/persistence.py +15 -1
- package/src/zexus/renderer/__init__.py +61 -0
- package/src/zexus/renderer/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/backend.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/canvas.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/color_system.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/layout.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/main_renderer.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/painter.cpython-312.pyc +0 -0
- package/src/zexus/renderer/backend.py +261 -0
- package/src/zexus/renderer/canvas.py +78 -0
- package/src/zexus/renderer/color_system.py +201 -0
- package/src/zexus/renderer/graphics.py +31 -0
- package/src/zexus/renderer/layout.py +222 -0
- package/src/zexus/renderer/main_renderer.py +66 -0
- package/src/zexus/renderer/painter.py +30 -0
- package/src/zexus/renderer/tk_backend.py +208 -0
- package/src/zexus/renderer/web_backend.py +260 -0
- package/src/zexus/runtime/__init__.py +10 -2
- package/src/zexus/runtime/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/runtime/__pycache__/async_runtime.cpython-312.pyc +0 -0
- package/src/zexus/runtime/__pycache__/load_manager.cpython-312.pyc +0 -0
- package/src/zexus/runtime/file_flags.py +137 -0
- package/src/zexus/runtime/load_manager.py +368 -0
- package/src/zexus/safety/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/safety/__pycache__/memory_safety.cpython-312.pyc +0 -0
- package/src/zexus/security.py +424 -34
- package/src/zexus/stdlib/fs.py +23 -18
- package/src/zexus/stdlib/http.py +289 -186
- package/src/zexus/stdlib/sockets.py +207 -163
- package/src/zexus/stdlib/websockets.py +282 -0
- package/src/zexus/stdlib_integration.py +369 -2
- package/src/zexus/strategy_recovery.py +6 -3
- package/src/zexus/type_checker.py +423 -0
- package/src/zexus/virtual_filesystem.py +189 -2
- package/src/zexus/vm/__init__.py +113 -3
- package/src/zexus/vm/__pycache__/async_optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/bytecode.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/bytecode_converter.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/cache.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/compiler.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/gas_metering.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/jit.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/parallel_vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/async_optimizer.py +80 -6
- package/src/zexus/vm/binary_bytecode.py +659 -0
- package/src/zexus/vm/bytecode.py +59 -11
- package/src/zexus/vm/bytecode_converter.py +26 -12
- package/src/zexus/vm/cabi.c +1985 -0
- package/src/zexus/vm/cabi.cpython-312-x86_64-linux-gnu.so +0 -0
- package/src/zexus/vm/cabi.h +127 -0
- package/src/zexus/vm/cache.py +561 -17
- package/src/zexus/vm/compiler.py +818 -51
- package/src/zexus/vm/fastops.c +15743 -0
- package/src/zexus/vm/fastops.cpython-312-x86_64-linux-gnu.so +0 -0
- package/src/zexus/vm/fastops.pyx +288 -0
- package/src/zexus/vm/gas_metering.py +50 -9
- package/src/zexus/vm/jit.py +364 -20
- package/src/zexus/vm/native_jit_backend.py +1816 -0
- package/src/zexus/vm/native_runtime.cpp +1388 -0
- package/src/zexus/vm/native_runtime.cpython-312-x86_64-linux-gnu.so +0 -0
- package/src/zexus/vm/optimizer.py +161 -11
- package/src/zexus/vm/parallel_vm.py +140 -45
- package/src/zexus/vm/peephole_optimizer.py +82 -4
- package/src/zexus/vm/profiler.py +38 -18
- package/src/zexus/vm/register_allocator.py +16 -5
- package/src/zexus/vm/register_vm.py +8 -5
- package/src/zexus/vm/vm.py +3581 -531
- package/src/zexus/vm/wasm_compiler.py +658 -0
- package/src/zexus/zexus_ast.py +137 -11
- package/src/zexus/zexus_token.py +16 -5
- package/src/zexus/zpm/installer.py +55 -15
- package/src/zexus/zpm/package_manager.py +1 -1
- package/src/zexus/zpm/registry.py +257 -28
- package/src/zexus.egg-info/PKG-INFO +16 -6
- package/src/zexus.egg-info/SOURCES.txt +129 -17
- package/src/zexus.egg-info/entry_points.txt +1 -0
- package/src/zexus.egg-info/requires.txt +4 -0
package/src/zexus/vm/vm.py
CHANGED
|
@@ -4,6 +4,7 @@ Integrated Extended VM for Zexus.
|
|
|
4
4
|
Capabilities:
|
|
5
5
|
- Architecture: Stack, Register, and Parallel execution modes.
|
|
6
6
|
- Compilation: Tiered compilation with JIT (Hot path detection).
|
|
7
|
+
self._ensure_recursion_headroom()
|
|
7
8
|
- Memory: Managed memory with Garbage Collection.
|
|
8
9
|
- Formats: High-level ops list and Low-level Bytecode.
|
|
9
10
|
- Features: Async primitives (SPAWN/AWAIT), Event System, Module Imports.
|
|
@@ -14,6 +15,7 @@ import os
|
|
|
14
15
|
import sys
|
|
15
16
|
import time
|
|
16
17
|
import asyncio
|
|
18
|
+
import threading
|
|
17
19
|
import importlib
|
|
18
20
|
import hashlib
|
|
19
21
|
import types
|
|
@@ -28,6 +30,7 @@ from ..object import (
|
|
|
28
30
|
List as ZList,
|
|
29
31
|
Map as ZMap,
|
|
30
32
|
Null as ZNull,
|
|
33
|
+
EvaluationError as ZEvaluationError,
|
|
31
34
|
)
|
|
32
35
|
|
|
33
36
|
# ==================== Backend / Optional Imports ====================
|
|
@@ -90,6 +93,49 @@ except ImportError:
|
|
|
90
93
|
PeepholeOptimizer = None
|
|
91
94
|
OptimizationLevel = None
|
|
92
95
|
|
|
96
|
+
# Bytecode Optimizer (Phase 8)
|
|
97
|
+
try:
|
|
98
|
+
from .optimizer import BytecodeOptimizer
|
|
99
|
+
_BYTECODE_OPTIMIZER_AVAILABLE = True
|
|
100
|
+
except ImportError:
|
|
101
|
+
_BYTECODE_OPTIMIZER_AVAILABLE = False
|
|
102
|
+
BytecodeOptimizer = None
|
|
103
|
+
|
|
104
|
+
# Cached Action/Lambda types for hot-path sync checks
|
|
105
|
+
_ZAction = None
|
|
106
|
+
_ZLambda = None
|
|
107
|
+
_security_mod = None
|
|
108
|
+
_iscoroutinefunction = asyncio.iscoroutinefunction
|
|
109
|
+
|
|
110
|
+
def _get_action_types():
|
|
111
|
+
global _ZAction, _ZLambda
|
|
112
|
+
if _ZAction is None:
|
|
113
|
+
try:
|
|
114
|
+
from ..object import Action as _A, LambdaFunction as _L
|
|
115
|
+
_ZAction = _A
|
|
116
|
+
_ZLambda = _L
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
119
|
+
return _ZAction, _ZLambda
|
|
120
|
+
|
|
121
|
+
def _get_security_mod():
|
|
122
|
+
global _security_mod
|
|
123
|
+
if _security_mod is None:
|
|
124
|
+
try:
|
|
125
|
+
from .. import security as _s
|
|
126
|
+
_security_mod = _s
|
|
127
|
+
except Exception:
|
|
128
|
+
pass
|
|
129
|
+
return _security_mod
|
|
130
|
+
|
|
131
|
+
# Cython fast-path (optional)
|
|
132
|
+
try:
|
|
133
|
+
from . import fastops as _fastops
|
|
134
|
+
_FASTOPS_AVAILABLE = True
|
|
135
|
+
except Exception:
|
|
136
|
+
_FASTOPS_AVAILABLE = False
|
|
137
|
+
_fastops = None
|
|
138
|
+
|
|
93
139
|
# Async Optimizer (Phase 8)
|
|
94
140
|
try:
|
|
95
141
|
from .async_optimizer import AsyncOptimizer, AsyncOptimizationLevel
|
|
@@ -109,9 +155,17 @@ except ImportError:
|
|
|
109
155
|
SSAConverter = None
|
|
110
156
|
RegisterAllocator = None
|
|
111
157
|
|
|
158
|
+
# Bytecode Converter (Stack -> Register)
|
|
159
|
+
try:
|
|
160
|
+
from .bytecode_converter import BytecodeConverter
|
|
161
|
+
_BYTECODE_CONVERTER_AVAILABLE = True
|
|
162
|
+
except ImportError:
|
|
163
|
+
_BYTECODE_CONVERTER_AVAILABLE = False
|
|
164
|
+
BytecodeConverter = None
|
|
165
|
+
|
|
112
166
|
# Renderer Backend
|
|
113
167
|
try:
|
|
114
|
-
from renderer import backend as _BACKEND
|
|
168
|
+
from ..renderer import backend as _BACKEND
|
|
115
169
|
_BACKEND_AVAILABLE = True
|
|
116
170
|
except Exception:
|
|
117
171
|
_BACKEND_AVAILABLE = False
|
|
@@ -146,6 +200,130 @@ class Cell:
|
|
|
146
200
|
return f"<Cell {self.value!r}>"
|
|
147
201
|
|
|
148
202
|
|
|
203
|
+
def _to_string_value(arg):
|
|
204
|
+
if isinstance(arg, ZString):
|
|
205
|
+
return arg.value
|
|
206
|
+
if isinstance(arg, ZInteger):
|
|
207
|
+
return str(arg.value)
|
|
208
|
+
if isinstance(arg, ZFloat):
|
|
209
|
+
return str(arg.value)
|
|
210
|
+
if isinstance(arg, ZBoolean):
|
|
211
|
+
return "true" if getattr(arg, "value", False) else "false"
|
|
212
|
+
if isinstance(arg, ZList):
|
|
213
|
+
try:
|
|
214
|
+
return arg.inspect()
|
|
215
|
+
except Exception:
|
|
216
|
+
return str(arg)
|
|
217
|
+
if isinstance(arg, ZMap):
|
|
218
|
+
try:
|
|
219
|
+
return arg.inspect()
|
|
220
|
+
except Exception:
|
|
221
|
+
return str(arg)
|
|
222
|
+
if arg is None or isinstance(arg, ZNull):
|
|
223
|
+
return "null"
|
|
224
|
+
if isinstance(arg, bool):
|
|
225
|
+
return "true" if arg else "false"
|
|
226
|
+
if isinstance(arg, (int, float)):
|
|
227
|
+
return str(arg)
|
|
228
|
+
if hasattr(arg, "inspect") and callable(getattr(arg, "inspect")):
|
|
229
|
+
try:
|
|
230
|
+
return arg.inspect()
|
|
231
|
+
except Exception:
|
|
232
|
+
return str(arg)
|
|
233
|
+
return str(arg)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _fallback_string(args):
|
|
237
|
+
if len(args) != 1:
|
|
238
|
+
return ZEvaluationError("string() takes exactly 1 argument")
|
|
239
|
+
return ZString(_to_string_value(args[0]))
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def _fallback_int(args):
|
|
243
|
+
if len(args) != 1:
|
|
244
|
+
return ZEvaluationError("int() takes exactly 1 argument")
|
|
245
|
+
value = args[0]
|
|
246
|
+
if isinstance(value, ZInteger):
|
|
247
|
+
return value
|
|
248
|
+
if isinstance(value, ZFloat):
|
|
249
|
+
return ZInteger(int(value.value))
|
|
250
|
+
if isinstance(value, ZString):
|
|
251
|
+
try:
|
|
252
|
+
return ZInteger(int(value.value))
|
|
253
|
+
except ValueError:
|
|
254
|
+
return ZEvaluationError(f"Cannot convert '{value.value}' to integer")
|
|
255
|
+
if isinstance(value, (int, float)):
|
|
256
|
+
return ZInteger(int(value))
|
|
257
|
+
return ZEvaluationError(f"int() not supported for type {type(value).__name__}")
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _fallback_float(args):
|
|
261
|
+
if len(args) != 1:
|
|
262
|
+
return ZEvaluationError("float() takes exactly 1 argument")
|
|
263
|
+
value = args[0]
|
|
264
|
+
if isinstance(value, ZFloat):
|
|
265
|
+
return value
|
|
266
|
+
if isinstance(value, ZInteger):
|
|
267
|
+
return ZFloat(float(value.value))
|
|
268
|
+
if isinstance(value, ZString):
|
|
269
|
+
try:
|
|
270
|
+
return ZFloat(float(value.value))
|
|
271
|
+
except ValueError:
|
|
272
|
+
return ZEvaluationError(f"Cannot convert '{value.value}' to float")
|
|
273
|
+
if isinstance(value, (int, float)):
|
|
274
|
+
return ZFloat(float(value))
|
|
275
|
+
return ZEvaluationError(f"float() not supported for type {type(value).__name__}")
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def _fallback_len(args):
|
|
279
|
+
if len(args) != 1:
|
|
280
|
+
return ZEvaluationError("len() takes exactly 1 argument")
|
|
281
|
+
value = args[0]
|
|
282
|
+
if isinstance(value, ZString):
|
|
283
|
+
return ZInteger(len(value.value))
|
|
284
|
+
if isinstance(value, ZList):
|
|
285
|
+
return ZInteger(len(getattr(value, "elements", [])))
|
|
286
|
+
if isinstance(value, ZMap):
|
|
287
|
+
return ZInteger(len(getattr(value, "pairs", {})))
|
|
288
|
+
if isinstance(value, str):
|
|
289
|
+
return ZInteger(len(value))
|
|
290
|
+
if isinstance(value, list):
|
|
291
|
+
return ZInteger(len(value))
|
|
292
|
+
if isinstance(value, dict):
|
|
293
|
+
return ZInteger(len(value))
|
|
294
|
+
return ZEvaluationError(f"len() not supported for type {type(value).__name__}")
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _fallback_type(args):
|
|
298
|
+
if len(args) != 1:
|
|
299
|
+
return ZEvaluationError("type() takes exactly 1 argument")
|
|
300
|
+
value = args[0]
|
|
301
|
+
if isinstance(value, ZInteger):
|
|
302
|
+
return ZString("Integer")
|
|
303
|
+
if isinstance(value, ZFloat):
|
|
304
|
+
return ZString("Float")
|
|
305
|
+
if isinstance(value, ZString):
|
|
306
|
+
return ZString("String")
|
|
307
|
+
if isinstance(value, ZBoolean):
|
|
308
|
+
return ZString("Boolean")
|
|
309
|
+
if isinstance(value, ZList):
|
|
310
|
+
return ZString("List")
|
|
311
|
+
if isinstance(value, ZMap):
|
|
312
|
+
return ZString("Map")
|
|
313
|
+
if value is None or isinstance(value, ZNull):
|
|
314
|
+
return ZString("Null")
|
|
315
|
+
return ZString(type(value).__name__)
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
_FALLBACK_BUILTINS = {
|
|
319
|
+
"string": _fallback_string,
|
|
320
|
+
"int": _fallback_int,
|
|
321
|
+
"float": _fallback_float,
|
|
322
|
+
"len": _fallback_len,
|
|
323
|
+
"type": _fallback_type,
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
|
|
149
327
|
class VM:
|
|
150
328
|
"""
|
|
151
329
|
Main Virtual Machine integrating advanced architecture with rich feature set.
|
|
@@ -160,6 +338,7 @@ class VM:
|
|
|
160
338
|
jit_threshold: int = 100,
|
|
161
339
|
use_memory_manager: bool = False,
|
|
162
340
|
max_heap_mb: int = 100,
|
|
341
|
+
gc_threshold: int = 1000,
|
|
163
342
|
mode: VMMode = VMMode.AUTO,
|
|
164
343
|
worker_count: int = None,
|
|
165
344
|
chunk_size: int = 50,
|
|
@@ -168,18 +347,31 @@ class VM:
|
|
|
168
347
|
debug: bool = False,
|
|
169
348
|
enable_profiling: bool = False,
|
|
170
349
|
profiling_level: str = "DETAILED",
|
|
350
|
+
profiling_sample_rate: float = 1.0,
|
|
351
|
+
profiling_max_samples: int = 2048,
|
|
352
|
+
profiling_track_overhead: bool = False,
|
|
171
353
|
enable_memory_pool: bool = True,
|
|
172
354
|
pool_max_size: int = 1000,
|
|
173
355
|
enable_peephole_optimizer: bool = True,
|
|
356
|
+
enable_bytecode_optimizer: bool = False,
|
|
357
|
+
optimizer_level: int = 2,
|
|
174
358
|
optimization_level: str = "MODERATE",
|
|
175
359
|
enable_async_optimizer: bool = True,
|
|
176
360
|
async_optimization_level: str = "MODERATE",
|
|
177
361
|
enable_ssa: bool = False,
|
|
178
362
|
enable_register_allocation: bool = False,
|
|
363
|
+
enable_bytecode_converter: bool = False,
|
|
364
|
+
converter_aggressive: bool = False,
|
|
365
|
+
fast_single_shot: bool = False,
|
|
366
|
+
single_shot_max_instructions: int = 64,
|
|
179
367
|
num_allocator_registers: int = 16,
|
|
180
368
|
enable_gas_metering: bool = True,
|
|
181
369
|
gas_limit: int = None,
|
|
182
|
-
enable_timeout: bool = True
|
|
370
|
+
enable_timeout: bool = True,
|
|
371
|
+
enable_fast_loop: bool = False,
|
|
372
|
+
fast_loop_threshold: int = 512,
|
|
373
|
+
enable_gas_light: bool = False,
|
|
374
|
+
gas_light_cost: int = 1
|
|
183
375
|
):
|
|
184
376
|
"""
|
|
185
377
|
Initialize the enhanced VM.
|
|
@@ -189,9 +381,12 @@ class VM:
|
|
|
189
381
|
self.env = env or {}
|
|
190
382
|
self._parent_env = parent_env
|
|
191
383
|
self.debug = debug
|
|
384
|
+
self._register_import_builtins()
|
|
192
385
|
|
|
193
386
|
# --- Gas Metering (Security) ---
|
|
194
387
|
self.enable_gas_metering = enable_gas_metering and _GAS_METERING_AVAILABLE
|
|
388
|
+
self.enable_gas_light = bool(enable_gas_light)
|
|
389
|
+
self.gas_light_cost = max(1, int(gas_light_cost))
|
|
195
390
|
self.gas_metering = None
|
|
196
391
|
if self.enable_gas_metering:
|
|
197
392
|
self.gas_metering = GasMetering(gas_limit=gas_limit, enable_timeout=enable_timeout)
|
|
@@ -222,6 +417,34 @@ class VM:
|
|
|
222
417
|
self._total_execution_time = 0.0
|
|
223
418
|
self._mode_usage = {m.value: 0 for m in VMMode}
|
|
224
419
|
self._last_opcode_profile = None
|
|
420
|
+
self._call_method_trace_count = 0
|
|
421
|
+
self._call_method_total = 0
|
|
422
|
+
self._method_target_trace_count = 0
|
|
423
|
+
self._action_evaluator = None
|
|
424
|
+
self._opcode_exec_count = 0
|
|
425
|
+
self._in_execution = 0
|
|
426
|
+
self._native_jit_auto_enabled = False
|
|
427
|
+
self._native_jit_auto_threshold = 700
|
|
428
|
+
self._env_version = 0
|
|
429
|
+
self._name_cache: Dict[str, Tuple[Any, int]] = {}
|
|
430
|
+
self._method_cache: Dict[Tuple[type, str], Any] = {}
|
|
431
|
+
self.enable_fast_loop = bool(enable_fast_loop)
|
|
432
|
+
try:
|
|
433
|
+
env_threshold = os.environ.get("ZEXUS_VM_FAST_LOOP_THRESHOLD")
|
|
434
|
+
if env_threshold is not None:
|
|
435
|
+
fast_loop_threshold = int(env_threshold)
|
|
436
|
+
except Exception:
|
|
437
|
+
pass
|
|
438
|
+
self.fast_loop_threshold = max(1, int(fast_loop_threshold))
|
|
439
|
+
self._fast_loop_stats: Dict[str, Any] = {"used": False, "reason": ""}
|
|
440
|
+
|
|
441
|
+
# Round 3 optimizations
|
|
442
|
+
self._isinstance_cache: Dict[Tuple[int, type], bool] = {} # Cache isinstance results
|
|
443
|
+
self._vm_pool: List[Any] = [] # Pool of reusable child VMs
|
|
444
|
+
self._vm_pool_lock = None # Will be set if pooling enabled
|
|
445
|
+
|
|
446
|
+
self.prefer_register = False
|
|
447
|
+
self.prefer_parallel = False
|
|
225
448
|
|
|
226
449
|
# --- JIT Compilation (Phase 2) ---
|
|
227
450
|
self.use_jit = use_jit and _JIT_AVAILABLE
|
|
@@ -249,7 +472,7 @@ class VM:
|
|
|
249
472
|
self._memory_lock = threading.Lock()
|
|
250
473
|
self.memory_manager = create_memory_manager(
|
|
251
474
|
max_heap_mb=max_heap_mb,
|
|
252
|
-
gc_threshold=
|
|
475
|
+
gc_threshold=gc_threshold
|
|
253
476
|
)
|
|
254
477
|
|
|
255
478
|
# --- Profiler (Phase 8) ---
|
|
@@ -258,7 +481,12 @@ class VM:
|
|
|
258
481
|
if self.enable_profiling:
|
|
259
482
|
try:
|
|
260
483
|
level = getattr(ProfilingLevel, profiling_level, ProfilingLevel.DETAILED)
|
|
261
|
-
self.profiler = InstructionProfiler(
|
|
484
|
+
self.profiler = InstructionProfiler(
|
|
485
|
+
level=level,
|
|
486
|
+
sample_rate=profiling_sample_rate,
|
|
487
|
+
max_samples=profiling_max_samples,
|
|
488
|
+
track_overhead=profiling_track_overhead
|
|
489
|
+
)
|
|
262
490
|
if debug:
|
|
263
491
|
print(f"[VM] Profiler enabled: {profiling_level}")
|
|
264
492
|
except Exception as e:
|
|
@@ -297,6 +525,20 @@ class VM:
|
|
|
297
525
|
print(f"[VM] Failed to enable peephole optimizer: {e}")
|
|
298
526
|
self.enable_peephole_optimizer = False
|
|
299
527
|
|
|
528
|
+
# --- Bytecode Optimizer (Phase 8) ---
|
|
529
|
+
self.enable_bytecode_optimizer = enable_bytecode_optimizer and _BYTECODE_OPTIMIZER_AVAILABLE
|
|
530
|
+
self.bytecode_optimizer = None
|
|
531
|
+
if self.enable_bytecode_optimizer:
|
|
532
|
+
try:
|
|
533
|
+
level = max(0, min(3, int(optimizer_level)))
|
|
534
|
+
self.bytecode_optimizer = BytecodeOptimizer(level=level, max_passes=5, debug=debug)
|
|
535
|
+
if debug:
|
|
536
|
+
print(f"[VM] Bytecode optimizer enabled: level={level}")
|
|
537
|
+
except Exception as e:
|
|
538
|
+
if debug:
|
|
539
|
+
print(f"[VM] Failed to enable bytecode optimizer: {e}")
|
|
540
|
+
self.enable_bytecode_optimizer = False
|
|
541
|
+
|
|
300
542
|
# --- Async Optimizer (Phase 8) ---
|
|
301
543
|
self.enable_async_optimizer = enable_async_optimizer and _ASYNC_OPTIMIZER_AVAILABLE
|
|
302
544
|
self.async_optimizer = None
|
|
@@ -316,6 +558,15 @@ class VM:
|
|
|
316
558
|
self.enable_register_allocation = enable_register_allocation and _SSA_AVAILABLE
|
|
317
559
|
self.ssa_converter = None
|
|
318
560
|
self.register_allocator = None
|
|
561
|
+
self.enable_bytecode_converter = enable_bytecode_converter and _BYTECODE_CONVERTER_AVAILABLE
|
|
562
|
+
self.bytecode_converter = None
|
|
563
|
+
|
|
564
|
+
# --- Fast single-shot execution ---
|
|
565
|
+
self.fast_single_shot = bool(fast_single_shot)
|
|
566
|
+
try:
|
|
567
|
+
self.single_shot_max_instructions = int(single_shot_max_instructions)
|
|
568
|
+
except Exception:
|
|
569
|
+
self.single_shot_max_instructions = 64
|
|
319
570
|
|
|
320
571
|
if self.enable_ssa:
|
|
321
572
|
try:
|
|
@@ -333,12 +584,28 @@ class VM:
|
|
|
333
584
|
num_registers=num_allocator_registers,
|
|
334
585
|
num_temp_registers=8
|
|
335
586
|
)
|
|
587
|
+
self._last_register_allocation = None
|
|
336
588
|
if debug:
|
|
337
589
|
print(f"[VM] Register allocator enabled: {num_allocator_registers} registers")
|
|
338
590
|
except Exception as e:
|
|
339
591
|
if debug:
|
|
340
592
|
print(f"[VM] Failed to enable register allocator: {e}")
|
|
341
593
|
self.enable_register_allocation = False
|
|
594
|
+
self._last_register_allocation = None
|
|
595
|
+
|
|
596
|
+
if self.enable_bytecode_converter:
|
|
597
|
+
try:
|
|
598
|
+
self.bytecode_converter = BytecodeConverter(
|
|
599
|
+
num_registers=num_registers,
|
|
600
|
+
aggressive=converter_aggressive,
|
|
601
|
+
debug=debug
|
|
602
|
+
)
|
|
603
|
+
if debug:
|
|
604
|
+
print(f"[VM] Bytecode converter enabled: aggressive={converter_aggressive}")
|
|
605
|
+
except Exception as e:
|
|
606
|
+
if debug:
|
|
607
|
+
print(f"[VM] Failed to enable bytecode converter: {e}")
|
|
608
|
+
self.enable_bytecode_converter = False
|
|
342
609
|
|
|
343
610
|
# --- Execution Mode Configuration ---
|
|
344
611
|
self.mode = mode
|
|
@@ -366,37 +633,194 @@ class VM:
|
|
|
366
633
|
if debug:
|
|
367
634
|
print(f"[VM] Initialized | Mode: {mode.value} | JIT: {self.use_jit} | MemMgr: {self.use_memory_manager}")
|
|
368
635
|
|
|
636
|
+
def _return_vm_to_pool(self, vm) -> None:
|
|
637
|
+
"""Return a child VM to the pool for reuse."""
|
|
638
|
+
if hasattr(self, "_vm_pool") and self._vm_pool is not None:
|
|
639
|
+
if len(self._vm_pool) < 1000:
|
|
640
|
+
self._vm_pool.append(vm)
|
|
641
|
+
|
|
642
|
+
@classmethod
|
|
643
|
+
def create_child(cls, parent_vm, env: Dict[str, Any], builtins: Dict[str, Any] = None):
|
|
644
|
+
"""
|
|
645
|
+
Create a lightweight child VM execution context sharing infrastructure/components from parent.
|
|
646
|
+
Avoids overhead of full initialization for function calls.
|
|
647
|
+
"""
|
|
648
|
+
vm = None
|
|
649
|
+
if hasattr(parent_vm, "_vm_pool") and parent_vm._vm_pool:
|
|
650
|
+
try:
|
|
651
|
+
vm = parent_vm._vm_pool.pop()
|
|
652
|
+
except IndexError:
|
|
653
|
+
pass
|
|
654
|
+
|
|
655
|
+
if vm is None:
|
|
656
|
+
vm = cls.__new__(cls)
|
|
657
|
+
|
|
658
|
+
# Core Context
|
|
659
|
+
vm.builtins = builtins if builtins is not None else parent_vm.builtins
|
|
660
|
+
vm.env = env
|
|
661
|
+
vm._parent_env = parent_vm
|
|
662
|
+
vm.debug = parent_vm.debug
|
|
663
|
+
vm.mode = parent_vm.mode
|
|
664
|
+
|
|
665
|
+
# Infrastructure (shared)
|
|
666
|
+
vm.use_jit = parent_vm.use_jit
|
|
667
|
+
vm.jit_compiler = parent_vm.jit_compiler
|
|
668
|
+
vm._jit_lock = parent_vm._jit_lock
|
|
669
|
+
|
|
670
|
+
vm.use_memory_manager = parent_vm.use_memory_manager
|
|
671
|
+
vm.memory_manager = parent_vm.memory_manager
|
|
672
|
+
vm._memory_lock = parent_vm._memory_lock
|
|
673
|
+
vm._managed_objects = {} # Local GC tracking
|
|
674
|
+
|
|
675
|
+
vm.enable_gas_metering = parent_vm.enable_gas_metering
|
|
676
|
+
vm.gas_metering = parent_vm.gas_metering
|
|
677
|
+
vm.enable_gas_light = getattr(parent_vm, "enable_gas_light", False)
|
|
678
|
+
vm.gas_light_cost = getattr(parent_vm, "gas_light_cost", 1)
|
|
679
|
+
|
|
680
|
+
vm.enable_profiling = parent_vm.enable_profiling
|
|
681
|
+
vm.profiler = parent_vm.profiler
|
|
682
|
+
|
|
683
|
+
# Optimizers (shared)
|
|
684
|
+
vm.enable_peephole_optimizer = parent_vm.enable_peephole_optimizer
|
|
685
|
+
vm.peephole_optimizer = parent_vm.peephole_optimizer
|
|
686
|
+
|
|
687
|
+
vm.enable_bytecode_optimizer = parent_vm.enable_bytecode_optimizer
|
|
688
|
+
vm.bytecode_optimizer = parent_vm.bytecode_optimizer
|
|
689
|
+
|
|
690
|
+
vm.enable_async_optimizer = parent_vm.enable_async_optimizer
|
|
691
|
+
vm.async_optimizer = parent_vm.async_optimizer
|
|
692
|
+
|
|
693
|
+
# Pools (shared)
|
|
694
|
+
vm.enable_memory_pool = parent_vm.enable_memory_pool
|
|
695
|
+
vm.integer_pool = parent_vm.integer_pool
|
|
696
|
+
vm.string_pool = parent_vm.string_pool
|
|
697
|
+
vm.list_pool = parent_vm.list_pool
|
|
698
|
+
|
|
699
|
+
# Advanced Features (shared)
|
|
700
|
+
vm.enable_ssa = parent_vm.enable_ssa
|
|
701
|
+
vm.ssa_converter = parent_vm.ssa_converter
|
|
702
|
+
vm.enable_register_allocation = parent_vm.enable_register_allocation
|
|
703
|
+
vm.register_allocator = parent_vm.register_allocator
|
|
704
|
+
|
|
705
|
+
vm.enable_bytecode_converter = parent_vm.enable_bytecode_converter
|
|
706
|
+
vm.bytecode_converter = parent_vm.bytecode_converter
|
|
707
|
+
|
|
708
|
+
# Execution Helpers (shared)
|
|
709
|
+
vm._register_vm = parent_vm._register_vm
|
|
710
|
+
vm._parallel_vm = parent_vm._parallel_vm
|
|
711
|
+
|
|
712
|
+
# Local State Init
|
|
713
|
+
vm._closure_cells = {}
|
|
714
|
+
vm._events = {}
|
|
715
|
+
vm._tasks = {}
|
|
716
|
+
vm._task_counter = 0
|
|
717
|
+
vm._env_version = 0
|
|
718
|
+
vm._name_cache = {}
|
|
719
|
+
vm._method_cache = {}
|
|
720
|
+
vm._execution_count = 0
|
|
721
|
+
vm._total_execution_time = 0.0
|
|
722
|
+
vm._mode_usage = {m.value: 0 for m in VMMode}
|
|
723
|
+
vm._last_opcode_profile = None
|
|
724
|
+
vm._call_method_trace_count = 0
|
|
725
|
+
vm._call_method_total = 0
|
|
726
|
+
vm._method_target_trace_count = 0
|
|
727
|
+
vm._action_evaluator = None
|
|
728
|
+
vm._opcode_exec_count = 0
|
|
729
|
+
vm._in_execution = 0
|
|
730
|
+
vm._native_jit_auto_enabled = parent_vm._native_jit_auto_enabled
|
|
731
|
+
vm._native_jit_auto_threshold = parent_vm._native_jit_auto_threshold
|
|
732
|
+
vm._perf_fast_dispatch = getattr(parent_vm, "_perf_fast_dispatch", False)
|
|
733
|
+
vm.enable_fast_loop = getattr(parent_vm, "enable_fast_loop", False)
|
|
734
|
+
vm.fast_loop_threshold = getattr(parent_vm, "fast_loop_threshold", 512)
|
|
735
|
+
vm._fast_loop_stats = {"used": False, "reason": ""}
|
|
736
|
+
|
|
737
|
+
# Settings
|
|
738
|
+
vm.worker_count = parent_vm.worker_count
|
|
739
|
+
vm.chunk_size = parent_vm.chunk_size
|
|
740
|
+
vm.num_registers = parent_vm.num_registers
|
|
741
|
+
vm.hybrid_mode = parent_vm.hybrid_mode
|
|
742
|
+
vm.fast_single_shot = parent_vm.fast_single_shot
|
|
743
|
+
vm.single_shot_max_instructions = parent_vm.single_shot_max_instructions
|
|
744
|
+
|
|
745
|
+
return vm
|
|
746
|
+
|
|
369
747
|
# ==================== VM <-> Evaluator Conversions ====================
|
|
370
748
|
|
|
371
749
|
@staticmethod
|
|
372
750
|
def _wrap_for_builtin(value: Any) -> Any:
|
|
373
|
-
|
|
751
|
+
# Fast path for primitives
|
|
752
|
+
t = type(value)
|
|
753
|
+
if t is int: return ZInteger(value)
|
|
754
|
+
if t is str: return ZString(value)
|
|
755
|
+
if t is bool: return ZBoolean(value)
|
|
756
|
+
if t is float: return ZFloat(value)
|
|
757
|
+
if value is None: return value
|
|
758
|
+
|
|
759
|
+
# Already wrapped check
|
|
760
|
+
if isinstance(value, (ZInteger, ZFloat, ZString, ZBoolean, ZList, ZMap)):
|
|
374
761
|
return value
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
if
|
|
378
|
-
return ZInteger(value)
|
|
379
|
-
if isinstance(value, float):
|
|
380
|
-
return ZFloat(value)
|
|
381
|
-
if isinstance(value, str):
|
|
382
|
-
return ZString(value)
|
|
383
|
-
if isinstance(value, list):
|
|
762
|
+
|
|
763
|
+
# Recursive structures
|
|
764
|
+
if t is list:
|
|
384
765
|
return ZList([VM._wrap_for_builtin(elem) for elem in value])
|
|
385
|
-
if
|
|
766
|
+
if t is dict:
|
|
386
767
|
pairs = {}
|
|
387
768
|
for key, val in value.items():
|
|
388
|
-
|
|
769
|
+
if isinstance(key, ZString):
|
|
770
|
+
norm_key = key.value
|
|
771
|
+
elif isinstance(key, str):
|
|
772
|
+
norm_key = key
|
|
773
|
+
elif hasattr(key, "inspect"):
|
|
774
|
+
try:
|
|
775
|
+
norm_key = key.inspect()
|
|
776
|
+
except Exception:
|
|
777
|
+
norm_key = str(key)
|
|
778
|
+
else:
|
|
779
|
+
norm_key = str(key)
|
|
389
780
|
wrapped_val = VM._wrap_for_builtin(val)
|
|
390
|
-
pairs[
|
|
781
|
+
pairs[norm_key] = wrapped_val
|
|
391
782
|
return ZMap(pairs)
|
|
392
783
|
return value
|
|
393
784
|
|
|
394
785
|
@staticmethod
|
|
395
|
-
def
|
|
396
|
-
|
|
397
|
-
|
|
786
|
+
def _format_print_value(value: Any) -> str:
|
|
787
|
+
"""Format a Zexus value for print output, unwrapping object wrappers."""
|
|
788
|
+
if value is None:
|
|
789
|
+
return "null"
|
|
790
|
+
t = type(value)
|
|
791
|
+
if t is int or t is str or t is float:
|
|
792
|
+
return str(value)
|
|
793
|
+
if t is bool:
|
|
794
|
+
return "true" if value else "false"
|
|
795
|
+
if isinstance(value, (ZInteger, ZFloat)):
|
|
796
|
+
return str(value.value)
|
|
398
797
|
if isinstance(value, ZString):
|
|
399
|
-
|
|
798
|
+
return value.value
|
|
799
|
+
if isinstance(value, ZBoolean):
|
|
800
|
+
return "true" if value.value else "false"
|
|
801
|
+
if isinstance(value, ZNull):
|
|
802
|
+
return "null"
|
|
803
|
+
if isinstance(value, ZList):
|
|
804
|
+
items = ", ".join(VM._format_print_value(e) for e in value.elements)
|
|
805
|
+
return f"[{items}]"
|
|
806
|
+
if isinstance(value, ZMap):
|
|
807
|
+
entries = ", ".join(
|
|
808
|
+
f"{VM._format_print_value(k)}: {VM._format_print_value(v)}"
|
|
809
|
+
for k, v in value.pairs.items()
|
|
810
|
+
)
|
|
811
|
+
return "{" + entries + "}"
|
|
812
|
+
if hasattr(value, "value") and not callable(getattr(value, "value")):
|
|
813
|
+
return str(value.value)
|
|
814
|
+
return str(value)
|
|
815
|
+
|
|
816
|
+
@staticmethod
|
|
817
|
+
def _unwrap_after_builtin(value: Any) -> Any:
|
|
818
|
+
# Fast path for common types
|
|
819
|
+
t = type(value)
|
|
820
|
+
if t is int or t is str or t is float or t is bool:
|
|
821
|
+
return value
|
|
822
|
+
|
|
823
|
+
if isinstance(value, (ZInteger, ZFloat, ZBoolean, ZString)):
|
|
400
824
|
return value.value
|
|
401
825
|
if isinstance(value, ZNull):
|
|
402
826
|
return None
|
|
@@ -421,6 +845,318 @@ class VM:
|
|
|
421
845
|
|
|
422
846
|
# ==================== Public Execution API ====================
|
|
423
847
|
|
|
848
|
+
def _run_coroutine_sync(self, coro):
|
|
849
|
+
"""Run a coroutine from sync code using the shared Zexus event loop.
|
|
850
|
+
|
|
851
|
+
The persistent background loop means all VM async tasks share a
|
|
852
|
+
single event loop and can coordinate via asyncio primitives.
|
|
853
|
+
"""
|
|
854
|
+
from ..event_loop import submit, is_loop_thread
|
|
855
|
+
if is_loop_thread():
|
|
856
|
+
# Already on the event-loop thread — fall back to a throwaway
|
|
857
|
+
# loop to avoid deadlock.
|
|
858
|
+
loop = asyncio.new_event_loop()
|
|
859
|
+
try:
|
|
860
|
+
return loop.run_until_complete(coro)
|
|
861
|
+
finally:
|
|
862
|
+
loop.close()
|
|
863
|
+
return submit(coro)
|
|
864
|
+
|
|
865
|
+
def _bump_env_version(self, name: Optional[str] = None, value: Any = None) -> None:
|
|
866
|
+
self._env_version += 1
|
|
867
|
+
if name is not None:
|
|
868
|
+
self._name_cache[name] = (value, self._env_version)
|
|
869
|
+
|
|
870
|
+
def _register_import_builtins(self) -> None:
|
|
871
|
+
if "__vm_use_module__" not in self.builtins:
|
|
872
|
+
self.builtins["__vm_use_module__"] = self._vm_use_module
|
|
873
|
+
if "__vm_from_module__" not in self.builtins:
|
|
874
|
+
self.builtins["__vm_from_module__"] = self._vm_from_module
|
|
875
|
+
|
|
876
|
+
def _vm_use_module(self, spec):
|
|
877
|
+
if spec is None:
|
|
878
|
+
return None
|
|
879
|
+
if isinstance(spec, ZMap):
|
|
880
|
+
spec = self._unwrap_after_builtin(spec)
|
|
881
|
+
file_path = spec.get("file", "") if isinstance(spec, dict) else ""
|
|
882
|
+
alias = spec.get("alias", "") if isinstance(spec, dict) else ""
|
|
883
|
+
names = spec.get("names", []) if isinstance(spec, dict) else []
|
|
884
|
+
is_named = bool(spec.get("is_named")) if isinstance(spec, dict) else False
|
|
885
|
+
trace_imports = os.environ.get("ZEXUS_VM_IMPORT_TRACE")
|
|
886
|
+
if trace_imports and trace_imports.lower() not in ("0", "false", "off"):
|
|
887
|
+
print(f"[VM TRACE] __vm_use_module__ file={file_path} alias={alias} names={len(names)}")
|
|
888
|
+
return self._execute_import(file_path, alias=alias, names=names, is_named=is_named)
|
|
889
|
+
|
|
890
|
+
def _vm_from_module(self, spec):
|
|
891
|
+
if spec is None:
|
|
892
|
+
return None
|
|
893
|
+
if isinstance(spec, ZMap):
|
|
894
|
+
spec = self._unwrap_after_builtin(spec)
|
|
895
|
+
file_path = spec.get("file", "") if isinstance(spec, dict) else ""
|
|
896
|
+
imports = spec.get("imports", []) if isinstance(spec, dict) else []
|
|
897
|
+
names = []
|
|
898
|
+
alias_map = {}
|
|
899
|
+
for entry in imports:
|
|
900
|
+
if isinstance(entry, dict):
|
|
901
|
+
name = entry.get("name")
|
|
902
|
+
alias = entry.get("alias")
|
|
903
|
+
elif isinstance(entry, (list, tuple)):
|
|
904
|
+
name = entry[0] if len(entry) > 0 else None
|
|
905
|
+
alias = entry[1] if len(entry) > 1 else None
|
|
906
|
+
else:
|
|
907
|
+
name = entry
|
|
908
|
+
alias = None
|
|
909
|
+
if name:
|
|
910
|
+
names.append(name)
|
|
911
|
+
if alias:
|
|
912
|
+
alias_map[name] = alias
|
|
913
|
+
return self._execute_import(file_path, alias="", names=names, is_named=True, alias_map=alias_map)
|
|
914
|
+
|
|
915
|
+
def _module_env_to_map(self, module_env):
|
|
916
|
+
if module_env is None:
|
|
917
|
+
return None
|
|
918
|
+
if isinstance(module_env, dict):
|
|
919
|
+
return module_env
|
|
920
|
+
exports = None
|
|
921
|
+
if hasattr(module_env, "get_exports"):
|
|
922
|
+
try:
|
|
923
|
+
exports = module_env.get_exports()
|
|
924
|
+
except Exception:
|
|
925
|
+
exports = None
|
|
926
|
+
store = getattr(module_env, "store", None)
|
|
927
|
+
if isinstance(store, dict):
|
|
928
|
+
if exports and isinstance(exports, dict):
|
|
929
|
+
merged = dict(store)
|
|
930
|
+
merged.update(exports)
|
|
931
|
+
return merged
|
|
932
|
+
return store
|
|
933
|
+
if isinstance(module_env, ZMap):
|
|
934
|
+
mapped = {}
|
|
935
|
+
for key, value in module_env.pairs.items():
|
|
936
|
+
if isinstance(key, ZString):
|
|
937
|
+
mapped[key.value] = value
|
|
938
|
+
else:
|
|
939
|
+
mapped[str(key)] = value
|
|
940
|
+
return mapped
|
|
941
|
+
return None
|
|
942
|
+
|
|
943
|
+
def _get_importer_file(self) -> Optional[str]:
|
|
944
|
+
importer = self.env.get("__file__") if isinstance(self.env, dict) else None
|
|
945
|
+
if importer is None:
|
|
946
|
+
return None
|
|
947
|
+
if hasattr(importer, "value"):
|
|
948
|
+
return importer.value
|
|
949
|
+
if isinstance(importer, str):
|
|
950
|
+
return importer
|
|
951
|
+
return None
|
|
952
|
+
|
|
953
|
+
def _load_zexus_module_env(self, file_path: str):
|
|
954
|
+
if os.environ.get("ZEXUS_DEBUG_COMPILER"):
|
|
955
|
+
with open("debug_compiler_fail.log", "a") as f:
|
|
956
|
+
f.write(f"[DEBUG] _load_zexus_module_env called for {file_path}\n")
|
|
957
|
+
from ..module_cache import (get_cached_module, cache_module, get_module_candidates,
|
|
958
|
+
normalize_path, invalidate_module,
|
|
959
|
+
begin_loading, end_loading, CircularImportError)
|
|
960
|
+
from ..object import Environment, String
|
|
961
|
+
from ..lexer import Lexer
|
|
962
|
+
from ..parser import Parser
|
|
963
|
+
from ..evaluator.core import Evaluator
|
|
964
|
+
|
|
965
|
+
normalized_path = normalize_path(file_path)
|
|
966
|
+
cached = get_cached_module(normalized_path)
|
|
967
|
+
if cached:
|
|
968
|
+
module_env, bytecode, ast = cached
|
|
969
|
+
return module_env
|
|
970
|
+
|
|
971
|
+
importer_file = self._get_importer_file()
|
|
972
|
+
candidates = get_module_candidates(file_path, importer_file)
|
|
973
|
+
for candidate in candidates:
|
|
974
|
+
try:
|
|
975
|
+
cached = get_cached_module(normalize_path(candidate))
|
|
976
|
+
if cached:
|
|
977
|
+
module_env, bytecode, ast = cached
|
|
978
|
+
return module_env
|
|
979
|
+
except Exception:
|
|
980
|
+
continue
|
|
981
|
+
|
|
982
|
+
# Circular import detection
|
|
983
|
+
try:
|
|
984
|
+
begin_loading(normalized_path)
|
|
985
|
+
except CircularImportError as e:
|
|
986
|
+
raise RuntimeError(str(e)) from e
|
|
987
|
+
|
|
988
|
+
module_env = Environment()
|
|
989
|
+
loaded = False
|
|
990
|
+
compiled_bytecode = None
|
|
991
|
+
parsed_ast = None
|
|
992
|
+
|
|
993
|
+
try:
|
|
994
|
+
for candidate in candidates:
|
|
995
|
+
try:
|
|
996
|
+
if not os.path.exists(candidate):
|
|
997
|
+
continue
|
|
998
|
+
with open(candidate, "r", encoding="utf-8") as handle:
|
|
999
|
+
code = handle.read()
|
|
1000
|
+
lexer = Lexer(code)
|
|
1001
|
+
parser = Parser(lexer)
|
|
1002
|
+
program = parser.parse_program()
|
|
1003
|
+
if getattr(parser, "errors", None):
|
|
1004
|
+
continue
|
|
1005
|
+
|
|
1006
|
+
parsed_ast = program
|
|
1007
|
+
module_env.set("__file__", String(os.path.abspath(candidate)))
|
|
1008
|
+
module_env.set("__MODULE__", String(file_path))
|
|
1009
|
+
|
|
1010
|
+
# Try to compile to bytecode and execute via VM (fast path)
|
|
1011
|
+
try:
|
|
1012
|
+
from .compiler import BytecodeCompiler
|
|
1013
|
+
|
|
1014
|
+
# Phase 1: check for co-located .zxc binary first
|
|
1015
|
+
try:
|
|
1016
|
+
from .binary_bytecode import load_zxc, save_zxc, is_zxc_fresh, zxc_path_for
|
|
1017
|
+
_zxc_path = zxc_path_for(candidate)
|
|
1018
|
+
if is_zxc_fresh(candidate):
|
|
1019
|
+
compiled_bytecode = load_zxc(_zxc_path)
|
|
1020
|
+
except Exception:
|
|
1021
|
+
pass
|
|
1022
|
+
|
|
1023
|
+
if compiled_bytecode is None:
|
|
1024
|
+
compiler = BytecodeCompiler(optimize=True)
|
|
1025
|
+
compiled_bytecode = compiler.compile(program)
|
|
1026
|
+
# Persist .zxc for next run
|
|
1027
|
+
if compiled_bytecode:
|
|
1028
|
+
try:
|
|
1029
|
+
from .binary_bytecode import save_zxc, zxc_path_for
|
|
1030
|
+
save_zxc(zxc_path_for(candidate), compiled_bytecode)
|
|
1031
|
+
except Exception:
|
|
1032
|
+
pass
|
|
1033
|
+
|
|
1034
|
+
if compiled_bytecode:
|
|
1035
|
+
# Execute module via VM (fast)
|
|
1036
|
+
vm_env = {k: v for k, v in module_env.store.items()}
|
|
1037
|
+
child_vm = VM.create_child(parent_vm=self, env=vm_env)
|
|
1038
|
+
result = child_vm._run_stack_bytecode_sync(compiled_bytecode, debug=False)
|
|
1039
|
+
|
|
1040
|
+
# Update module environment from VM execution
|
|
1041
|
+
for k, v in child_vm.env.items():
|
|
1042
|
+
module_env.set(k, v)
|
|
1043
|
+
|
|
1044
|
+
self._return_vm_to_pool(child_vm)
|
|
1045
|
+
|
|
1046
|
+
cache_module(normalized_path, module_env, compiled_bytecode, parsed_ast)
|
|
1047
|
+
cache_module(normalize_path(candidate), module_env, compiled_bytecode, parsed_ast)
|
|
1048
|
+
loaded = True
|
|
1049
|
+
break
|
|
1050
|
+
except Exception as e:
|
|
1051
|
+
if os.environ.get("ZEXUS_DEBUG_COMPILER"):
|
|
1052
|
+
print(f"[DEBUG] Compiler exception for {candidate}: {e}")
|
|
1053
|
+
pass
|
|
1054
|
+
|
|
1055
|
+
# Fallback to interpreter execution (slow path)
|
|
1056
|
+
if self._action_evaluator is None:
|
|
1057
|
+
self._action_evaluator = Evaluator(use_vm=False)
|
|
1058
|
+
self._action_evaluator.eval_node(program, module_env)
|
|
1059
|
+
cache_module(normalized_path, module_env, None, parsed_ast)
|
|
1060
|
+
cache_module(normalize_path(candidate), module_env, None, parsed_ast)
|
|
1061
|
+
loaded = True
|
|
1062
|
+
break
|
|
1063
|
+
except Exception:
|
|
1064
|
+
continue
|
|
1065
|
+
finally:
|
|
1066
|
+
end_loading(normalized_path)
|
|
1067
|
+
|
|
1068
|
+
if not loaded:
|
|
1069
|
+
try:
|
|
1070
|
+
invalidate_module(normalized_path)
|
|
1071
|
+
except Exception:
|
|
1072
|
+
pass
|
|
1073
|
+
return None
|
|
1074
|
+
return module_env
|
|
1075
|
+
|
|
1076
|
+
def _execute_import(self, module_path: str, alias: str = "", names: Optional[List[Any]] = None, is_named: bool = False, alias_map: Optional[Dict[str, str]] = None):
|
|
1077
|
+
if not module_path:
|
|
1078
|
+
return None
|
|
1079
|
+
names = names or []
|
|
1080
|
+
alias_map = alias_map or {}
|
|
1081
|
+
module_env = None
|
|
1082
|
+
module_map = None
|
|
1083
|
+
trace_imports = os.environ.get("ZEXUS_VM_IMPORT_TRACE")
|
|
1084
|
+
trace_enabled = trace_imports and trace_imports.lower() not in ("0", "false", "off")
|
|
1085
|
+
|
|
1086
|
+
try:
|
|
1087
|
+
from ..stdlib_integration import is_stdlib_module, get_stdlib_module
|
|
1088
|
+
from ..builtin_modules import is_builtin_module, get_builtin_module
|
|
1089
|
+
if is_stdlib_module(module_path):
|
|
1090
|
+
module_env = get_stdlib_module(module_path)
|
|
1091
|
+
elif is_builtin_module(module_path):
|
|
1092
|
+
module_env = get_builtin_module(module_path, None)
|
|
1093
|
+
except Exception:
|
|
1094
|
+
module_env = None
|
|
1095
|
+
|
|
1096
|
+
if module_env is None:
|
|
1097
|
+
module_env = self._load_zexus_module_env(module_path)
|
|
1098
|
+
if trace_enabled:
|
|
1099
|
+
status = "ok" if module_env is not None else "failed"
|
|
1100
|
+
print(f"[VM TRACE] import {module_path} -> {status}")
|
|
1101
|
+
|
|
1102
|
+
if module_env is not None:
|
|
1103
|
+
module_map = self._module_env_to_map(module_env) or {}
|
|
1104
|
+
if is_named and names:
|
|
1105
|
+
for raw in names:
|
|
1106
|
+
key = raw.value if hasattr(raw, "value") else str(raw)
|
|
1107
|
+
dest = alias_map.get(key, key)
|
|
1108
|
+
value = module_map.get(key)
|
|
1109
|
+
self.env[dest] = value
|
|
1110
|
+
self._bump_env_version(dest, value)
|
|
1111
|
+
elif alias:
|
|
1112
|
+
self.env[alias] = module_map
|
|
1113
|
+
self._bump_env_version(alias, module_map)
|
|
1114
|
+
else:
|
|
1115
|
+
for key, value in module_map.items():
|
|
1116
|
+
self.env[key] = value
|
|
1117
|
+
self._bump_env_version(key, value)
|
|
1118
|
+
return module_env
|
|
1119
|
+
|
|
1120
|
+
try:
|
|
1121
|
+
mod = importlib.import_module(module_path)
|
|
1122
|
+
key = alias or module_path
|
|
1123
|
+
self.env[key] = mod
|
|
1124
|
+
self._bump_env_version(key, mod)
|
|
1125
|
+
return mod
|
|
1126
|
+
except Exception:
|
|
1127
|
+
key = alias or module_path
|
|
1128
|
+
self.env[key] = None
|
|
1129
|
+
self._bump_env_version(key, None)
|
|
1130
|
+
return None
|
|
1131
|
+
|
|
1132
|
+
def _get_cached_method(self, target: Any, method_name: str):
|
|
1133
|
+
if target is None:
|
|
1134
|
+
return None
|
|
1135
|
+
if isinstance(target, (dict, ZMap, ZList)):
|
|
1136
|
+
return None
|
|
1137
|
+
try:
|
|
1138
|
+
if hasattr(target, "__dict__") and method_name in target.__dict__:
|
|
1139
|
+
return getattr(target, method_name, None)
|
|
1140
|
+
except Exception:
|
|
1141
|
+
return getattr(target, method_name, None)
|
|
1142
|
+
|
|
1143
|
+
key = (type(target), method_name)
|
|
1144
|
+
cached = self._method_cache.get(key)
|
|
1145
|
+
if cached is not None:
|
|
1146
|
+
try:
|
|
1147
|
+
return cached.__get__(target, type(target))
|
|
1148
|
+
except Exception:
|
|
1149
|
+
return getattr(target, method_name, None)
|
|
1150
|
+
|
|
1151
|
+
attr = getattr(type(target), method_name, None)
|
|
1152
|
+
if attr is not None:
|
|
1153
|
+
self._method_cache[key] = attr
|
|
1154
|
+
try:
|
|
1155
|
+
return attr.__get__(target, type(target))
|
|
1156
|
+
except Exception:
|
|
1157
|
+
return getattr(target, method_name, None)
|
|
1158
|
+
return getattr(target, method_name, None)
|
|
1159
|
+
|
|
424
1160
|
def execute(self, code: Union[List[Tuple], Any], debug: bool = False) -> Any:
|
|
425
1161
|
"""
|
|
426
1162
|
Execute code (High-level ops or Bytecode) using optimal execution mode.
|
|
@@ -428,6 +1164,7 @@ class VM:
|
|
|
428
1164
|
"""
|
|
429
1165
|
start_time = time.perf_counter()
|
|
430
1166
|
self._execution_count += 1
|
|
1167
|
+
self._in_execution = getattr(self, "_in_execution", 0) + 1
|
|
431
1168
|
|
|
432
1169
|
# Handle High-Level Ops (List format)
|
|
433
1170
|
if isinstance(code, list) and not hasattr(code, "instructions"):
|
|
@@ -435,7 +1172,7 @@ class VM:
|
|
|
435
1172
|
print("[VM] Executing High-Level Ops")
|
|
436
1173
|
try:
|
|
437
1174
|
# Run purely async internally, execute blocks
|
|
438
|
-
return
|
|
1175
|
+
return self._run_coroutine_sync(self._run_high_level_ops(code, debug or self.debug))
|
|
439
1176
|
except Exception as e:
|
|
440
1177
|
if debug or self.debug: print(f"[VM HL Error] {e}")
|
|
441
1178
|
raise e
|
|
@@ -444,6 +1181,10 @@ class VM:
|
|
|
444
1181
|
try:
|
|
445
1182
|
execution_mode = self._select_execution_mode(code)
|
|
446
1183
|
self._mode_usage[execution_mode.value] += 1
|
|
1184
|
+
|
|
1185
|
+
trace_mode = os.environ.get("ZEXUS_VM_TRACE_MODE")
|
|
1186
|
+
if trace_mode and trace_mode.lower() not in ("0", "false", "off"):
|
|
1187
|
+
print(f"[VM TRACE] execution mode={execution_mode.value}")
|
|
447
1188
|
|
|
448
1189
|
if debug or self.debug:
|
|
449
1190
|
print(f"[VM] Executing Bytecode | Mode: {execution_mode.value}")
|
|
@@ -456,31 +1197,60 @@ class VM:
|
|
|
456
1197
|
elif execution_mode == VMMode.PARALLEL and self._parallel_vm:
|
|
457
1198
|
result = self._execute_parallel(code, debug)
|
|
458
1199
|
|
|
459
|
-
# 3.
|
|
1200
|
+
# 3. Fast synchronous path for performance mode (no async overhead)
|
|
1201
|
+
elif getattr(self, '_perf_fast_dispatch', False):
|
|
1202
|
+
result = self._run_stack_bytecode_sync(code, debug)
|
|
1203
|
+
|
|
1204
|
+
# 4. Stack Mode (Standard/Fallback + Async Support)
|
|
460
1205
|
else:
|
|
461
|
-
result =
|
|
1206
|
+
result = self._run_coroutine_sync(self._execute_stack(code, debug))
|
|
462
1207
|
|
|
463
1208
|
# JIT Tracking
|
|
464
1209
|
if self.use_jit and hasattr(code, 'instructions'):
|
|
465
1210
|
execution_time = time.perf_counter() - start_time
|
|
466
1211
|
self._track_execution_for_jit(code, execution_time, execution_mode)
|
|
467
1212
|
|
|
1213
|
+
profile_print = os.environ.get("ZEXUS_VM_PROFILE_PRINT")
|
|
1214
|
+
if profile_print and profile_print.lower() not in ("0", "false", "off"):
|
|
1215
|
+
if self._last_opcode_profile:
|
|
1216
|
+
try:
|
|
1217
|
+
top_n = int(os.environ.get("ZEXUS_VM_PROFILE_TOP", "10"))
|
|
1218
|
+
except Exception:
|
|
1219
|
+
top_n = 10
|
|
1220
|
+
total_ops = sum(count for _, count in self._last_opcode_profile)
|
|
1221
|
+
elapsed = time.perf_counter() - start_time
|
|
1222
|
+
ops_per_sec = (total_ops / elapsed) if elapsed > 0 else 0.0
|
|
1223
|
+
print(f"[VM PROFILE] total_ops={total_ops} top={top_n} elapsed_ms={elapsed * 1000:.2f} ops_per_sec={ops_per_sec:.2f}")
|
|
1224
|
+
for op_name, count in self._last_opcode_profile[:top_n]:
|
|
1225
|
+
pct = (count / total_ops * 100) if total_ops else 0.0
|
|
1226
|
+
print(f"[VM PROFILE] {op_name} count={count} pct={pct:.2f}%")
|
|
468
1227
|
return result
|
|
469
1228
|
|
|
470
1229
|
finally:
|
|
1230
|
+
self._in_execution = max(0, getattr(self, "_in_execution", 1) - 1)
|
|
471
1231
|
self._total_execution_time += (time.perf_counter() - start_time)
|
|
472
1232
|
|
|
473
1233
|
def _select_execution_mode(self, code) -> VMMode:
|
|
474
1234
|
if self.mode != VMMode.AUTO:
|
|
475
1235
|
return self.mode
|
|
476
|
-
|
|
1236
|
+
|
|
1237
|
+
if hasattr(code, 'instructions'):
|
|
1238
|
+
instructions = code.instructions
|
|
1239
|
+
if self.prefer_parallel and self._parallel_vm and self._is_parallelizable(instructions):
|
|
1240
|
+
return VMMode.PARALLEL
|
|
1241
|
+
if self.prefer_register and self._register_vm and self._is_register_friendly(instructions):
|
|
1242
|
+
return VMMode.REGISTER
|
|
1243
|
+
|
|
1244
|
+
if self.use_jit:
|
|
1245
|
+
return VMMode.STACK
|
|
1246
|
+
|
|
477
1247
|
if hasattr(code, 'instructions'):
|
|
478
1248
|
instructions = code.instructions
|
|
479
1249
|
if self._parallel_vm and self._is_parallelizable(instructions):
|
|
480
1250
|
return VMMode.PARALLEL
|
|
481
1251
|
if self._register_vm and self._is_register_friendly(instructions):
|
|
482
1252
|
return VMMode.REGISTER
|
|
483
|
-
|
|
1253
|
+
|
|
484
1254
|
return VMMode.STACK
|
|
485
1255
|
|
|
486
1256
|
# ==================== Specialized Execution Methods ====================
|
|
@@ -494,6 +1264,17 @@ class VM:
|
|
|
494
1264
|
def _execute_register(self, bytecode, debug: bool = False):
|
|
495
1265
|
"""Execute using register-based VM"""
|
|
496
1266
|
try:
|
|
1267
|
+
if self.enable_bytecode_converter and self.bytecode_converter and hasattr(bytecode, "instructions"):
|
|
1268
|
+
try:
|
|
1269
|
+
if not bytecode.metadata.get("converted_to_register"):
|
|
1270
|
+
bytecode = self.bytecode_converter.convert(bytecode)
|
|
1271
|
+
except Exception:
|
|
1272
|
+
pass
|
|
1273
|
+
if self.enable_register_allocation and self.register_allocator and hasattr(bytecode, "instructions"):
|
|
1274
|
+
try:
|
|
1275
|
+
self._last_register_allocation = self.allocate_registers(bytecode.instructions)
|
|
1276
|
+
except Exception:
|
|
1277
|
+
self._last_register_allocation = None
|
|
497
1278
|
# Ensure register VM has current environment and builtins
|
|
498
1279
|
self._register_vm.env = self.env.copy()
|
|
499
1280
|
self._register_vm.builtins = self.builtins.copy()
|
|
@@ -508,34 +1289,126 @@ class VM:
|
|
|
508
1289
|
return result
|
|
509
1290
|
except Exception as e:
|
|
510
1291
|
if debug: print(f"[VM Register] Failed: {e}, falling back to stack")
|
|
511
|
-
return
|
|
1292
|
+
return self._run_coroutine_sync(self._run_stack_bytecode(bytecode, debug))
|
|
512
1293
|
|
|
513
1294
|
def _execute_parallel(self, bytecode, debug: bool = False):
|
|
514
1295
|
"""Execute using parallel VM"""
|
|
515
1296
|
try:
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
1297
|
+
optimized_bytecode = self._optimize_bytecode_for_parallel(bytecode)
|
|
1298
|
+
return self._parallel_vm.execute(
|
|
1299
|
+
optimized_bytecode,
|
|
1300
|
+
initial_state={
|
|
1301
|
+
"env": self.env.copy(),
|
|
1302
|
+
"builtins": self.builtins.copy(),
|
|
1303
|
+
"parent_env": self._parent_env,
|
|
1304
|
+
},
|
|
519
1305
|
)
|
|
520
1306
|
except Exception as e:
|
|
521
1307
|
if debug: print(f"[VM Parallel] Failed: {e}, falling back to stack")
|
|
522
|
-
return
|
|
1308
|
+
return self._run_coroutine_sync(self._run_stack_bytecode(bytecode, debug))
|
|
1309
|
+
|
|
1310
|
+
def _optimize_bytecode_for_parallel(self, bytecode):
|
|
1311
|
+
"""Apply peephole/SSA optimizations and map opcodes for parallel execution."""
|
|
1312
|
+
from .bytecode import Bytecode, Opcode
|
|
1313
|
+
|
|
1314
|
+
consts = list(getattr(bytecode, "constants", []))
|
|
1315
|
+
instrs = list(getattr(bytecode, "instructions", []))
|
|
1316
|
+
|
|
1317
|
+
if self.enable_bytecode_optimizer and self.bytecode_optimizer:
|
|
1318
|
+
try:
|
|
1319
|
+
normalized_for_opt: List[Tuple[Any, Any]] = []
|
|
1320
|
+
for instr in instrs:
|
|
1321
|
+
if instr is None:
|
|
1322
|
+
continue
|
|
1323
|
+
if isinstance(instr, tuple) and len(instr) >= 2:
|
|
1324
|
+
op = instr[0]
|
|
1325
|
+
operand = instr[1] if len(instr) == 2 else tuple(instr[1:])
|
|
1326
|
+
op_name = op.name if hasattr(op, "name") else op
|
|
1327
|
+
normalized_for_opt.append((str(op_name), operand))
|
|
1328
|
+
instrs = self.bytecode_optimizer.optimize(normalized_for_opt, consts)
|
|
1329
|
+
except Exception:
|
|
1330
|
+
pass
|
|
1331
|
+
|
|
1332
|
+
if self.enable_peephole_optimizer and self.peephole_optimizer:
|
|
1333
|
+
try:
|
|
1334
|
+
instrs, consts = self.peephole_optimizer.optimize_bytecode(instrs, consts)
|
|
1335
|
+
except Exception:
|
|
1336
|
+
pass
|
|
1337
|
+
|
|
1338
|
+
normalized: List[Tuple[Any, Any]] = []
|
|
1339
|
+
for instr in instrs:
|
|
1340
|
+
if instr is None:
|
|
1341
|
+
continue
|
|
1342
|
+
if isinstance(instr, tuple) and len(instr) >= 2:
|
|
1343
|
+
op = instr[0]
|
|
1344
|
+
operand = instr[1] if len(instr) == 2 else tuple(instr[1:])
|
|
1345
|
+
op_name = op.name if hasattr(op, "name") else op
|
|
1346
|
+
normalized.append((op_name, operand))
|
|
1347
|
+
|
|
1348
|
+
instrs = normalized
|
|
1349
|
+
|
|
1350
|
+
if self.enable_ssa and self.ssa_converter:
|
|
1351
|
+
try:
|
|
1352
|
+
ssa_program = self.ssa_converter.convert_to_ssa(instrs)
|
|
1353
|
+
ssa_instrs = destruct_ssa(ssa_program)
|
|
1354
|
+
instrs, consts = self._normalize_ssa_instructions(ssa_instrs, consts)
|
|
1355
|
+
except Exception:
|
|
1356
|
+
pass
|
|
1357
|
+
|
|
1358
|
+
mapped: List[Tuple[Any, Any]] = []
|
|
1359
|
+
for op, operand in instrs:
|
|
1360
|
+
if isinstance(op, str) and op in Opcode.__members__:
|
|
1361
|
+
mapped.append((Opcode[op], operand))
|
|
1362
|
+
else:
|
|
1363
|
+
mapped.append((op, operand))
|
|
1364
|
+
|
|
1365
|
+
return Bytecode(instructions=mapped, constants=consts)
|
|
523
1366
|
|
|
524
1367
|
# ==================== JIT & Optimization Heuristics ====================
|
|
525
1368
|
|
|
526
1369
|
def _is_parallelizable(self, instructions) -> bool:
|
|
527
|
-
if len(instructions) < 100:
|
|
528
|
-
|
|
1370
|
+
if len(instructions) < 100:
|
|
1371
|
+
return False
|
|
1372
|
+
def _op_name(op):
|
|
1373
|
+
return op.name if hasattr(op, 'name') else op
|
|
1374
|
+
independent_ops = sum(
|
|
1375
|
+
1 for op, _ in instructions
|
|
1376
|
+
if _op_name(op) in ['LOAD_CONST', 'ADD', 'SUB', 'MUL', 'HASH_BLOCK']
|
|
1377
|
+
)
|
|
529
1378
|
return independent_ops / len(instructions) > 0.3
|
|
530
1379
|
|
|
531
1380
|
def _is_register_friendly(self, instructions) -> bool:
|
|
532
|
-
|
|
1381
|
+
def _op_name(op):
|
|
1382
|
+
return op.name if hasattr(op, 'name') else op
|
|
1383
|
+
arith_ops = sum(
|
|
1384
|
+
1 for op, _ in instructions
|
|
1385
|
+
if _op_name(op) in ['ADD', 'SUB', 'MUL', 'DIV', 'EQ', 'LT']
|
|
1386
|
+
)
|
|
533
1387
|
return arith_ops / max(len(instructions), 1) > 0.4
|
|
534
1388
|
|
|
535
1389
|
def _track_execution_for_jit(self, bytecode, execution_time: float, execution_mode: VMMode):
|
|
536
1390
|
if not self.use_jit or not self.jit_compiler: return
|
|
1391
|
+
|
|
1392
|
+
if (not self._native_jit_auto_enabled
|
|
1393
|
+
and self._opcode_exec_count >= self._native_jit_auto_threshold):
|
|
1394
|
+
self._native_jit_auto_enabled = self.jit_compiler.enable_native_backend()
|
|
537
1395
|
|
|
538
|
-
|
|
1396
|
+
# OPTIMIZATION: Skip lock for single-threaded execution (47 lock acquisitions cost 37.6s!)
|
|
1397
|
+
use_lock = self._jit_lock is not None
|
|
1398
|
+
|
|
1399
|
+
if use_lock:
|
|
1400
|
+
with self._jit_lock:
|
|
1401
|
+
hot_path_info = self.jit_compiler.track_execution(bytecode, execution_time)
|
|
1402
|
+
bytecode_hash = getattr(hot_path_info, 'bytecode_hash', None) or self.jit_compiler._hash_bytecode(bytecode)
|
|
1403
|
+
|
|
1404
|
+
if bytecode_hash not in self._jit_execution_stats:
|
|
1405
|
+
self._jit_execution_stats[bytecode_hash] = []
|
|
1406
|
+
self._jit_execution_stats[bytecode_hash].append(execution_time)
|
|
1407
|
+
|
|
1408
|
+
# Check if should compile (outside lock to avoid holding during compilation)
|
|
1409
|
+
should_compile = self.jit_compiler.should_compile(bytecode_hash)
|
|
1410
|
+
else:
|
|
1411
|
+
# Lock-free path for single-threaded execution
|
|
539
1412
|
hot_path_info = self.jit_compiler.track_execution(bytecode, execution_time)
|
|
540
1413
|
bytecode_hash = getattr(hot_path_info, 'bytecode_hash', None) or self.jit_compiler._hash_bytecode(bytecode)
|
|
541
1414
|
|
|
@@ -543,17 +1416,86 @@ class VM:
|
|
|
543
1416
|
self._jit_execution_stats[bytecode_hash] = []
|
|
544
1417
|
self._jit_execution_stats[bytecode_hash].append(execution_time)
|
|
545
1418
|
|
|
546
|
-
# Check if should compile (outside lock to avoid holding during compilation)
|
|
547
1419
|
should_compile = self.jit_compiler.should_compile(bytecode_hash)
|
|
548
1420
|
|
|
549
1421
|
# Compile outside the lock to prevent blocking other executions
|
|
550
1422
|
if should_compile:
|
|
551
1423
|
if self.debug: print(f"[VM JIT] Compiling hot path: {bytecode_hash[:8]}")
|
|
552
|
-
|
|
553
|
-
|
|
1424
|
+
if use_lock:
|
|
1425
|
+
with self._jit_lock:
|
|
1426
|
+
# Double-check it hasn't been compiled by another thread
|
|
1427
|
+
if self.jit_compiler.should_compile(bytecode_hash):
|
|
1428
|
+
self.jit_compiler.compile_hot_path(bytecode)
|
|
1429
|
+
else:
|
|
554
1430
|
if self.jit_compiler.should_compile(bytecode_hash):
|
|
555
1431
|
self.jit_compiler.compile_hot_path(bytecode)
|
|
556
1432
|
|
|
1433
|
+
def _normalize_ssa_instructions(self, instructions: List[Tuple], consts: List[Any]) -> Tuple[List[Tuple], List[Any]]:
|
|
1434
|
+
"""Normalize SSA-destructed instructions to (opcode, operand) format."""
|
|
1435
|
+
# Large programs can produce large instruction streams. The previous
|
|
1436
|
+
# constant lookup did a linear scan for every constant insertion, which
|
|
1437
|
+
# becomes O(n^2) on big constant pools. Prefer a dict for hashable
|
|
1438
|
+
# constants; fall back to linear scan for unhashables.
|
|
1439
|
+
const_index: Dict[Tuple[type, Any], int] = {}
|
|
1440
|
+
try:
|
|
1441
|
+
for i, const in enumerate(consts):
|
|
1442
|
+
try:
|
|
1443
|
+
const_index[(type(const), const)] = i
|
|
1444
|
+
except Exception:
|
|
1445
|
+
continue
|
|
1446
|
+
except Exception:
|
|
1447
|
+
const_index = {}
|
|
1448
|
+
|
|
1449
|
+
def _const_index(value: Any) -> int:
|
|
1450
|
+
try:
|
|
1451
|
+
key = (type(value), value)
|
|
1452
|
+
existing = const_index.get(key)
|
|
1453
|
+
if existing is not None:
|
|
1454
|
+
return existing
|
|
1455
|
+
except Exception:
|
|
1456
|
+
key = None
|
|
1457
|
+
|
|
1458
|
+
# Fallback for unhashable values: preserve legacy semantics
|
|
1459
|
+
for i, const in enumerate(consts):
|
|
1460
|
+
try:
|
|
1461
|
+
if const == value and type(const) == type(value):
|
|
1462
|
+
return i
|
|
1463
|
+
except Exception:
|
|
1464
|
+
continue
|
|
1465
|
+
|
|
1466
|
+
consts.append(value)
|
|
1467
|
+
idx = len(consts) - 1
|
|
1468
|
+
if key is not None:
|
|
1469
|
+
try:
|
|
1470
|
+
const_index[key] = idx
|
|
1471
|
+
except Exception:
|
|
1472
|
+
pass
|
|
1473
|
+
return idx
|
|
1474
|
+
|
|
1475
|
+
normalized: List[Tuple] = []
|
|
1476
|
+
for instr in instructions:
|
|
1477
|
+
if instr is None:
|
|
1478
|
+
continue
|
|
1479
|
+
if not isinstance(instr, tuple):
|
|
1480
|
+
continue
|
|
1481
|
+
op = instr[0]
|
|
1482
|
+
if len(instr) == 2:
|
|
1483
|
+
normalized.append((op, instr[1]))
|
|
1484
|
+
continue
|
|
1485
|
+
if op == "MOVE" and len(instr) >= 3:
|
|
1486
|
+
src = instr[1]
|
|
1487
|
+
dest = instr[2]
|
|
1488
|
+
src_idx = _const_index(src)
|
|
1489
|
+
dest_idx = _const_index(dest)
|
|
1490
|
+
normalized.append(("LOAD_NAME", src_idx))
|
|
1491
|
+
normalized.append(("STORE_NAME", dest_idx))
|
|
1492
|
+
continue
|
|
1493
|
+
|
|
1494
|
+
operand = tuple(instr[1:])
|
|
1495
|
+
normalized.append((op, operand))
|
|
1496
|
+
|
|
1497
|
+
return normalized, consts
|
|
1498
|
+
|
|
557
1499
|
def get_jit_stats(self) -> Dict[str, Any]:
|
|
558
1500
|
if self.use_jit and self.jit_compiler:
|
|
559
1501
|
stats = self.jit_compiler.get_stats()
|
|
@@ -562,6 +1504,14 @@ class VM:
|
|
|
562
1504
|
return stats
|
|
563
1505
|
return {'jit_enabled': False}
|
|
564
1506
|
|
|
1507
|
+
def _ensure_recursion_headroom(self, minimum: int = 5000):
|
|
1508
|
+
try:
|
|
1509
|
+
current = sys.getrecursionlimit()
|
|
1510
|
+
if current < minimum:
|
|
1511
|
+
sys.setrecursionlimit(minimum)
|
|
1512
|
+
except Exception:
|
|
1513
|
+
pass
|
|
1514
|
+
|
|
565
1515
|
def clear_jit_cache(self):
|
|
566
1516
|
if self.use_jit and self.jit_compiler:
|
|
567
1517
|
with self._jit_lock:
|
|
@@ -593,6 +1543,7 @@ class VM:
|
|
|
593
1543
|
with self._memory_lock:
|
|
594
1544
|
stats = self.memory_manager.get_stats()
|
|
595
1545
|
stats['managed_objects_count'] = len(self._managed_objects)
|
|
1546
|
+
stats['memory_manager_enabled'] = True
|
|
596
1547
|
return stats
|
|
597
1548
|
return {'memory_manager_enabled': False}
|
|
598
1549
|
|
|
@@ -709,14 +1660,12 @@ class VM:
|
|
|
709
1660
|
await self._call_builtin_async(h, [payload])
|
|
710
1661
|
elif code == "IMPORT":
|
|
711
1662
|
_, module_path, alias = op
|
|
712
|
-
|
|
713
|
-
mod = importlib.import_module(module_path)
|
|
714
|
-
self.env[alias or module_path] = mod
|
|
715
|
-
except Exception:
|
|
716
|
-
self.env[alias or module_path] = None
|
|
1663
|
+
self._execute_import(module_path, alias=alias or "")
|
|
717
1664
|
elif code == "DEFINE_ENUM":
|
|
718
1665
|
_, name, members = op
|
|
719
|
-
self.env.setdefault("enums", {})
|
|
1666
|
+
enum_registry = self.env.setdefault("enums", {})
|
|
1667
|
+
enum_registry[name] = members
|
|
1668
|
+
self.env[name] = members
|
|
720
1669
|
elif code == "DEFINE_PROTOCOL":
|
|
721
1670
|
_, name, spec = op
|
|
722
1671
|
self.env.setdefault("protocols", {})[name] = spec
|
|
@@ -755,46 +1704,916 @@ class VM:
|
|
|
755
1704
|
if tag == "LIST": return [self._eval_hl_op(e) for e in op[1]]
|
|
756
1705
|
return None
|
|
757
1706
|
|
|
758
|
-
# ====================
|
|
759
|
-
|
|
760
|
-
async def _run_stack_bytecode(self, bytecode, debug=False):
|
|
761
|
-
# 1. JIT Check (with thread safety)
|
|
762
|
-
if self.use_jit and self.jit_compiler:
|
|
763
|
-
with self._jit_lock:
|
|
764
|
-
bytecode_hash = self.jit_compiler._hash_bytecode(bytecode)
|
|
765
|
-
jit_function = self.jit_compiler.compilation_cache.get(bytecode_hash)
|
|
766
|
-
|
|
767
|
-
if jit_function:
|
|
768
|
-
try:
|
|
769
|
-
start_t = time.perf_counter()
|
|
770
|
-
stack = []
|
|
771
|
-
result = jit_function(self, stack, self.env)
|
|
772
|
-
with self._jit_lock:
|
|
773
|
-
self.jit_compiler.record_execution_time(bytecode_hash, time.perf_counter() - start_t, ExecutionTier.JIT_NATIVE)
|
|
774
|
-
if debug: print(f"[VM JIT] Executed cached function")
|
|
775
|
-
return result
|
|
776
|
-
except Exception as e:
|
|
777
|
-
if debug: print(f"[VM JIT] Failed: {e}, falling back")
|
|
1707
|
+
# ==================== Fast Synchronous Dispatch (Performance Mode) ====================
|
|
778
1708
|
|
|
779
|
-
|
|
1709
|
+
def _run_stack_bytecode_sync(self, bytecode, debug=False):
|
|
1710
|
+
"""Synchronous fast-path execution without async overhead or gas metering."""
|
|
780
1711
|
consts = list(getattr(bytecode, "constants", []))
|
|
781
1712
|
instrs = list(getattr(bytecode, "instructions", []))
|
|
1713
|
+
|
|
1714
|
+
if not self._native_jit_auto_enabled:
|
|
1715
|
+
self._opcode_exec_count += len(instrs)
|
|
1716
|
+
|
|
1717
|
+
# Normalize opcodes
|
|
1718
|
+
normalized: List[Tuple[str, Any]] = []
|
|
1719
|
+
for instr in instrs:
|
|
1720
|
+
if instr is None:
|
|
1721
|
+
continue
|
|
1722
|
+
if isinstance(instr, tuple) and len(instr) >= 2:
|
|
1723
|
+
op = instr[0]
|
|
1724
|
+
operand = instr[1] if len(instr) == 2 else tuple(instr[1:])
|
|
1725
|
+
op_name = op.name if hasattr(op, "name") else op
|
|
1726
|
+
normalized.append((op_name, operand))
|
|
1727
|
+
instrs = normalized
|
|
1728
|
+
|
|
1729
|
+
# Cython fast-path if available (skip when gas metering is active
|
|
1730
|
+
# because the native code doesn't enforce gas limits)
|
|
1731
|
+
if _FASTOPS_AVAILABLE and not self.gas_metering:
|
|
1732
|
+
try:
|
|
1733
|
+
return _fastops.execute(instrs, consts, self.env, self.builtins, self._closure_cells)
|
|
1734
|
+
except NotImplementedError:
|
|
1735
|
+
pass
|
|
1736
|
+
except Exception:
|
|
1737
|
+
pass
|
|
1738
|
+
|
|
1739
|
+
# Fast stack implementation
|
|
1740
|
+
stack: List[Any] = []
|
|
1741
|
+
stack_append = stack.append
|
|
1742
|
+
# stack_pop = stack.pop
|
|
1743
|
+
def stack_pop():
|
|
1744
|
+
if not stack:
|
|
1745
|
+
return None
|
|
1746
|
+
return stack.pop()
|
|
1747
|
+
|
|
782
1748
|
ip = 0
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
1749
|
+
trace_interval = 0
|
|
1750
|
+
try:
|
|
1751
|
+
trace_interval = int(os.environ.get("ZEXUS_VM_TRACE_INTERVAL", "0"))
|
|
1752
|
+
except Exception:
|
|
1753
|
+
trace_interval = 0
|
|
1754
|
+
trace_counter = 0
|
|
1755
|
+
instr_count = len(instrs)
|
|
1756
|
+
env = self.env
|
|
1757
|
+
builtins = self.builtins
|
|
1758
|
+
|
|
1759
|
+
def const(idx):
|
|
1760
|
+
return consts[idx] if isinstance(idx, int) and 0 <= idx < len(consts) else idx
|
|
1761
|
+
|
|
1762
|
+
def resolve(name):
|
|
1763
|
+
cached = self._name_cache.get(name)
|
|
1764
|
+
if cached and cached[1] == self._env_version:
|
|
1765
|
+
return cached[0]
|
|
1766
|
+
if name in env:
|
|
1767
|
+
val = env[name]
|
|
1768
|
+
resolved = val.value if isinstance(val, Cell) else val
|
|
1769
|
+
self._name_cache[name] = (resolved, self._env_version)
|
|
1770
|
+
return resolved
|
|
1771
|
+
if name in self._closure_cells:
|
|
1772
|
+
resolved = self._closure_cells[name].value
|
|
1773
|
+
self._name_cache[name] = (resolved, self._env_version)
|
|
1774
|
+
return resolved
|
|
1775
|
+
return None
|
|
1776
|
+
|
|
1777
|
+
def store(name, value):
|
|
1778
|
+
if name in env and isinstance(env[name], Cell):
|
|
1779
|
+
env[name].value = value
|
|
1780
|
+
self._bump_env_version(name, value)
|
|
1781
|
+
else:
|
|
1782
|
+
env[name] = value
|
|
1783
|
+
self._bump_env_version(name, value)
|
|
1784
|
+
|
|
1785
|
+
# Gas metering for sync path (security: prevents DoS via
|
|
1786
|
+
# unbounded computation even when using the fast path)
|
|
1787
|
+
_gas = self.gas_metering # may be None
|
|
1788
|
+
_gas_consume = _gas.consume if _gas else None
|
|
1789
|
+
_gas_light = self.enable_gas_light
|
|
1790
|
+
_gas_light_cost = self.gas_light_cost if _gas_light else 0
|
|
1791
|
+
_gas_consume_light = _gas.consume_light if _gas else None
|
|
1792
|
+
|
|
1793
|
+
while ip < instr_count:
|
|
1794
|
+
op_name, operand = instrs[ip]
|
|
1795
|
+
ip += 1
|
|
792
1796
|
|
|
793
|
-
|
|
794
|
-
|
|
1797
|
+
# --- Gas accounting (fast) ---
|
|
1798
|
+
if _gas is not None:
|
|
1799
|
+
if _gas_light:
|
|
1800
|
+
if not _gas_consume_light(_gas_light_cost):
|
|
1801
|
+
from .gas_metering import OutOfGasError
|
|
1802
|
+
raise OutOfGasError(_gas.gas_used, _gas.gas_limit, op_name)
|
|
1803
|
+
else:
|
|
1804
|
+
if not _gas_consume(op_name):
|
|
1805
|
+
from .gas_metering import OutOfGasError
|
|
1806
|
+
raise OutOfGasError(_gas.gas_used, _gas.gas_limit, op_name)
|
|
795
1807
|
|
|
796
|
-
|
|
797
|
-
|
|
1808
|
+
if trace_interval > 0:
|
|
1809
|
+
trace_counter += 1
|
|
1810
|
+
if trace_counter % trace_interval == 0:
|
|
1811
|
+
try:
|
|
1812
|
+
stack_size = len(stack)
|
|
1813
|
+
except Exception:
|
|
1814
|
+
stack_size = -1
|
|
1815
|
+
print(f"[VM TRACE] sync ip={ip} op={op_name} stack={stack_size}")
|
|
1816
|
+
|
|
1817
|
+
# Hot path: arithmetic and stack ops (inlined)
|
|
1818
|
+
if op_name == "LOAD_CONST":
|
|
1819
|
+
stack_append(const(operand))
|
|
1820
|
+
elif op_name == "LOAD_NAME":
|
|
1821
|
+
stack_append(resolve(const(operand)))
|
|
1822
|
+
elif op_name == "STORE_NAME":
|
|
1823
|
+
store(const(operand), stack_pop() if stack else None)
|
|
1824
|
+
elif op_name == "POP":
|
|
1825
|
+
if stack: stack_pop()
|
|
1826
|
+
elif op_name == "DUP":
|
|
1827
|
+
if stack: stack_append(stack[-1])
|
|
1828
|
+
elif op_name == "ADD":
|
|
1829
|
+
b = stack_pop() if stack else 0
|
|
1830
|
+
a = stack_pop() if stack else 0
|
|
1831
|
+
if hasattr(a, 'value'): a = a.value
|
|
1832
|
+
if hasattr(b, 'value'): b = b.value
|
|
1833
|
+
stack_append(a + b)
|
|
1834
|
+
elif op_name == "SUB":
|
|
1835
|
+
b = stack_pop() if stack else 0
|
|
1836
|
+
a = stack_pop() if stack else 0
|
|
1837
|
+
if hasattr(a, 'value'): a = a.value
|
|
1838
|
+
if hasattr(b, 'value'): b = b.value
|
|
1839
|
+
if a is None: a = 0
|
|
1840
|
+
if b is None: b = 0
|
|
1841
|
+
stack_append(a - b)
|
|
1842
|
+
elif op_name == "MUL":
|
|
1843
|
+
b = stack_pop() if stack else 0
|
|
1844
|
+
a = stack_pop() if stack else 0
|
|
1845
|
+
if hasattr(a, 'value'): a = a.value
|
|
1846
|
+
if hasattr(b, 'value'): b = b.value
|
|
1847
|
+
stack_append(a * b)
|
|
1848
|
+
elif op_name == "DIV":
|
|
1849
|
+
b = stack_pop() if stack else 1
|
|
1850
|
+
a = stack_pop() if stack else 0
|
|
1851
|
+
if hasattr(a, 'value'): a = a.value
|
|
1852
|
+
if hasattr(b, 'value'): b = b.value
|
|
1853
|
+
stack_append(a / b if b != 0 else 0)
|
|
1854
|
+
elif op_name == "MOD":
|
|
1855
|
+
b = stack_pop() if stack else 1
|
|
1856
|
+
a = stack_pop() if stack else 0
|
|
1857
|
+
stack_append(a % b if b != 0 else 0)
|
|
1858
|
+
elif op_name == "EQ":
|
|
1859
|
+
b = stack_pop() if stack else None
|
|
1860
|
+
a = stack_pop() if stack else None
|
|
1861
|
+
stack_append(a == b)
|
|
1862
|
+
elif op_name == "NEQ":
|
|
1863
|
+
b = stack_pop() if stack else None
|
|
1864
|
+
a = stack_pop() if stack else None
|
|
1865
|
+
stack_append(a != b)
|
|
1866
|
+
elif op_name == "LT":
|
|
1867
|
+
b = stack_pop() if stack else 0
|
|
1868
|
+
a = stack_pop() if stack else 0
|
|
1869
|
+
if a is None or b is None: stack_append(False)
|
|
1870
|
+
else: stack_append(a < b)
|
|
1871
|
+
elif op_name == "GT":
|
|
1872
|
+
b = stack_pop() if stack else 0
|
|
1873
|
+
a = stack_pop() if stack else 0
|
|
1874
|
+
if a is None or b is None: stack_append(False)
|
|
1875
|
+
else: stack_append(a > b)
|
|
1876
|
+
elif op_name == "LTE":
|
|
1877
|
+
b = stack_pop() if stack else 0
|
|
1878
|
+
a = stack_pop() if stack else 0
|
|
1879
|
+
if a is None or b is None: stack_append(False)
|
|
1880
|
+
else: stack_append(a <= b)
|
|
1881
|
+
elif op_name == "GTE":
|
|
1882
|
+
b = stack_pop() if stack else 0
|
|
1883
|
+
a = stack_pop() if stack else 0
|
|
1884
|
+
if a is None or b is None: stack_append(False)
|
|
1885
|
+
else: stack_append(a >= b)
|
|
1886
|
+
elif op_name == "NOT":
|
|
1887
|
+
a = stack_pop() if stack else False
|
|
1888
|
+
stack_append(not a)
|
|
1889
|
+
elif op_name == "NEG":
|
|
1890
|
+
a = stack_pop() if stack else 0
|
|
1891
|
+
stack_append(-a)
|
|
1892
|
+
elif op_name == "JUMP":
|
|
1893
|
+
ip = operand
|
|
1894
|
+
elif op_name == "JUMP_IF_FALSE":
|
|
1895
|
+
cond = stack_pop() if stack else None
|
|
1896
|
+
if not cond:
|
|
1897
|
+
ip = operand
|
|
1898
|
+
elif op_name == "RETURN":
|
|
1899
|
+
return stack_pop() if stack else None
|
|
1900
|
+
elif op_name == "BUILD_LIST":
|
|
1901
|
+
count = operand if operand is not None else 0
|
|
1902
|
+
elements = [stack_pop() for _ in range(count)][::-1]
|
|
1903
|
+
stack_append(elements)
|
|
1904
|
+
elif op_name == "BUILD_MAP":
|
|
1905
|
+
count = operand if operand is not None else 0
|
|
1906
|
+
result = {}
|
|
1907
|
+
for _ in range(count):
|
|
1908
|
+
val = stack_pop()
|
|
1909
|
+
key = stack_pop()
|
|
1910
|
+
result[key] = val
|
|
1911
|
+
stack_append(result)
|
|
1912
|
+
elif op_name == "INDEX":
|
|
1913
|
+
idx = stack_pop()
|
|
1914
|
+
obj = stack_pop()
|
|
1915
|
+
try:
|
|
1916
|
+
if isinstance(obj, ZList):
|
|
1917
|
+
stack_append(obj.get(idx))
|
|
1918
|
+
elif isinstance(obj, ZMap):
|
|
1919
|
+
stack_append(obj.get(idx))
|
|
1920
|
+
else:
|
|
1921
|
+
stack_append(obj[idx] if obj is not None else None)
|
|
1922
|
+
except (IndexError, KeyError, TypeError):
|
|
1923
|
+
stack_append(None)
|
|
1924
|
+
elif op_name == "SLICE":
|
|
1925
|
+
end = stack_pop() if stack else None
|
|
1926
|
+
start = stack_pop() if stack else None
|
|
1927
|
+
obj = stack_pop() if stack else None
|
|
1928
|
+
if hasattr(start, "value"):
|
|
1929
|
+
start = start.value
|
|
1930
|
+
if hasattr(end, "value"):
|
|
1931
|
+
end = end.value
|
|
1932
|
+
try:
|
|
1933
|
+
if isinstance(obj, ZList):
|
|
1934
|
+
stack_append(ZList(obj.elements[start:end]))
|
|
1935
|
+
elif isinstance(obj, ZString):
|
|
1936
|
+
stack_append(ZString(obj.value[start:end]))
|
|
1937
|
+
else:
|
|
1938
|
+
stack_append(obj[start:end] if obj is not None else None)
|
|
1939
|
+
except Exception:
|
|
1940
|
+
stack_append(None)
|
|
1941
|
+
elif op_name == "GET_LENGTH":
|
|
1942
|
+
obj = stack_pop()
|
|
1943
|
+
try:
|
|
1944
|
+
if obj is None:
|
|
1945
|
+
stack_append(0)
|
|
1946
|
+
elif isinstance(obj, ZList):
|
|
1947
|
+
stack_append(len(obj.elements))
|
|
1948
|
+
elif isinstance(obj, ZMap):
|
|
1949
|
+
stack_append(len(obj.pairs))
|
|
1950
|
+
elif hasattr(obj, '__len__'):
|
|
1951
|
+
stack_append(len(obj))
|
|
1952
|
+
else:
|
|
1953
|
+
stack_append(0)
|
|
1954
|
+
except Exception:
|
|
1955
|
+
stack_append(0)
|
|
1956
|
+
elif op_name == "CALL_NAME":
|
|
1957
|
+
name_idx, arg_count = operand
|
|
1958
|
+
func_name = const(name_idx)
|
|
1959
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
1960
|
+
fn = resolve(func_name) or builtins.get(func_name)
|
|
1961
|
+
if fn is None:
|
|
1962
|
+
res = self._call_fallback_builtin(func_name, args)
|
|
1963
|
+
else:
|
|
1964
|
+
res = self._invoke_callable_sync(fn, args)
|
|
1965
|
+
stack_append(res)
|
|
1966
|
+
elif op_name == "CALL_TOP":
|
|
1967
|
+
arg_count = operand or 0
|
|
1968
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
1969
|
+
fn_obj = stack_pop() if stack else None
|
|
1970
|
+
res = self._invoke_callable_sync(fn_obj, args)
|
|
1971
|
+
stack_append(res)
|
|
1972
|
+
elif op_name == "CALL_METHOD":
|
|
1973
|
+
if not operand:
|
|
1974
|
+
stack_append(None)
|
|
1975
|
+
continue
|
|
1976
|
+
method_idx, arg_count = operand
|
|
1977
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
1978
|
+
target = stack_pop() if stack else None
|
|
1979
|
+
method_name = const(method_idx)
|
|
1980
|
+
trace_calls = os.environ.get("ZEXUS_VM_TRACE_CALLS")
|
|
1981
|
+
if trace_calls:
|
|
1982
|
+
try:
|
|
1983
|
+
interval = int(trace_calls) if trace_calls.isdigit() else 1000
|
|
1984
|
+
except Exception:
|
|
1985
|
+
interval = 1000
|
|
1986
|
+
self._call_method_total += 1
|
|
1987
|
+
if interval > 0 and self._call_method_total % interval == 0:
|
|
1988
|
+
target_type = type(target).__name__ if target is not None else "None"
|
|
1989
|
+
print(f"[VM TRACE] CALL_METHOD total={self._call_method_total} method={method_name} target={target_type}")
|
|
1990
|
+
if target is None:
|
|
1991
|
+
stack_append(None)
|
|
1992
|
+
continue
|
|
1993
|
+
result = None
|
|
1994
|
+
try:
|
|
1995
|
+
if method_name == "set":
|
|
1996
|
+
if isinstance(target, ZMap) and len(args) >= 2:
|
|
1997
|
+
key = args[0]
|
|
1998
|
+
if isinstance(key, ZString):
|
|
1999
|
+
norm_key = key.value
|
|
2000
|
+
elif isinstance(key, str):
|
|
2001
|
+
norm_key = key
|
|
2002
|
+
elif hasattr(key, "inspect"):
|
|
2003
|
+
norm_key = key.inspect()
|
|
2004
|
+
else:
|
|
2005
|
+
norm_key = str(key)
|
|
2006
|
+
existing = target.pairs.get(norm_key)
|
|
2007
|
+
if existing is not None and existing.__class__.__name__ == 'SealedObject':
|
|
2008
|
+
raise ZEvaluationError(f"Cannot modify sealed map key: {key}")
|
|
2009
|
+
target.pairs[norm_key] = args[1]
|
|
2010
|
+
result = args[1]
|
|
2011
|
+
elif isinstance(target, ZList) and len(args) >= 2:
|
|
2012
|
+
target.set(args[0], args[1])
|
|
2013
|
+
result = args[1]
|
|
2014
|
+
elif isinstance(target, (dict, list)) and len(args) >= 2:
|
|
2015
|
+
target[args[0]] = args[1]
|
|
2016
|
+
result = args[1]
|
|
2017
|
+
elif method_name == "get":
|
|
2018
|
+
if isinstance(target, ZMap) and args:
|
|
2019
|
+
result = target.get(args[0])
|
|
2020
|
+
elif isinstance(target, dict) and args:
|
|
2021
|
+
result = target.get(args[0])
|
|
2022
|
+
elif hasattr(target, "call_method"):
|
|
2023
|
+
wrapped_args = [self._wrap_for_builtin(arg) for arg in args]
|
|
2024
|
+
try:
|
|
2025
|
+
from .. import security as _security
|
|
2026
|
+
_security._set_vm_action_context(True)
|
|
2027
|
+
except Exception:
|
|
2028
|
+
_security = None
|
|
2029
|
+
try:
|
|
2030
|
+
result = target.call_method(method_name, wrapped_args)
|
|
2031
|
+
finally:
|
|
2032
|
+
if _security is not None:
|
|
2033
|
+
try:
|
|
2034
|
+
_security._set_vm_action_context(False)
|
|
2035
|
+
except Exception:
|
|
2036
|
+
pass
|
|
2037
|
+
else:
|
|
2038
|
+
attr = self._get_cached_method(target, method_name)
|
|
2039
|
+
if callable(attr):
|
|
2040
|
+
result = attr(*args)
|
|
2041
|
+
elif isinstance(target, dict) and method_name in target:
|
|
2042
|
+
candidate = target[method_name]
|
|
2043
|
+
result = candidate(*args) if callable(candidate) else candidate
|
|
2044
|
+
else:
|
|
2045
|
+
result = attr
|
|
2046
|
+
except Exception:
|
|
2047
|
+
result = None
|
|
2048
|
+
stack_append(self._unwrap_after_builtin(result))
|
|
2049
|
+
elif op_name == "PRINT":
|
|
2050
|
+
val = stack_pop() if stack else None
|
|
2051
|
+
print(self._format_print_value(val))
|
|
2052
|
+
elif op_name == "GET_ATTR":
|
|
2053
|
+
attr = stack_pop() if stack else None
|
|
2054
|
+
obj = stack_pop() if stack else None
|
|
2055
|
+
if obj is None:
|
|
2056
|
+
stack_append(None)
|
|
2057
|
+
else:
|
|
2058
|
+
attr_name = attr.value if hasattr(attr, 'value') else attr
|
|
2059
|
+
try:
|
|
2060
|
+
if isinstance(obj, ZMap):
|
|
2061
|
+
key = attr_name
|
|
2062
|
+
if isinstance(key, str):
|
|
2063
|
+
key = ZString(key)
|
|
2064
|
+
stack_append(obj.get(key))
|
|
2065
|
+
elif isinstance(obj, dict):
|
|
2066
|
+
stack_append(obj.get(attr_name))
|
|
2067
|
+
elif hasattr(obj, 'get') and hasattr(obj, 'set') and callable(getattr(obj, 'get', None)):
|
|
2068
|
+
# Contract-like objects (e.g., SmartContract) expose state via get/set.
|
|
2069
|
+
stack_append(obj.get(attr_name))
|
|
2070
|
+
else:
|
|
2071
|
+
stack_append(getattr(obj, attr_name, None))
|
|
2072
|
+
except Exception:
|
|
2073
|
+
stack_append(None)
|
|
2074
|
+
elif op_name == "DEFINE_CONTRACT":
|
|
2075
|
+
contract_obj = self._build_smart_contract(operand, stack, stack_pop, const, env)
|
|
2076
|
+
stack_append(contract_obj)
|
|
2077
|
+
|
|
2078
|
+
# --- Blockchain / TX opcodes (sync-safe) ---
|
|
2079
|
+
|
|
2080
|
+
elif op_name == "HASH_BLOCK":
|
|
2081
|
+
block_data = stack_pop() if stack else ""
|
|
2082
|
+
if isinstance(block_data, dict):
|
|
2083
|
+
import json; block_data = json.dumps(block_data, sort_keys=True)
|
|
2084
|
+
if not isinstance(block_data, (bytes, str)): block_data = str(block_data)
|
|
2085
|
+
if isinstance(block_data, str): block_data = block_data.encode('utf-8')
|
|
2086
|
+
try:
|
|
2087
|
+
from Crypto.Hash import keccak as _keccak_mod
|
|
2088
|
+
h = _keccak_mod.new(digest_bits=256, data=block_data)
|
|
2089
|
+
stack_append(h.hexdigest())
|
|
2090
|
+
except ImportError:
|
|
2091
|
+
stack_append(hashlib.sha256(block_data).hexdigest())
|
|
2092
|
+
|
|
2093
|
+
elif op_name == "VERIFY_SIGNATURE":
|
|
2094
|
+
if len(stack) >= 3:
|
|
2095
|
+
pk = stack.pop(); msg = stack.pop(); sig = stack.pop()
|
|
2096
|
+
verify_fn = builtins.get("verify_sig") or env.get("verify_sig")
|
|
2097
|
+
if verify_fn and callable(verify_fn):
|
|
2098
|
+
try:
|
|
2099
|
+
res = verify_fn(sig, msg, pk)
|
|
2100
|
+
except Exception:
|
|
2101
|
+
res = False
|
|
2102
|
+
stack_append(res)
|
|
2103
|
+
else:
|
|
2104
|
+
try:
|
|
2105
|
+
from ..blockchain.crypto import CryptoPlugin
|
|
2106
|
+
sig_s = sig.value if hasattr(sig, 'value') else str(sig)
|
|
2107
|
+
msg_s = msg.value if hasattr(msg, 'value') else str(msg)
|
|
2108
|
+
pk_s = pk.value if hasattr(pk, 'value') else str(pk)
|
|
2109
|
+
stack_append(CryptoPlugin.verify_signature(msg_s, sig_s, pk_s))
|
|
2110
|
+
except ImportError:
|
|
2111
|
+
stack_append(False)
|
|
2112
|
+
else:
|
|
2113
|
+
stack_append(False)
|
|
2114
|
+
|
|
2115
|
+
elif op_name == "MERKLE_ROOT":
|
|
2116
|
+
leaf_count = operand if operand is not None else 0
|
|
2117
|
+
if leaf_count <= 0 or len(stack) < leaf_count:
|
|
2118
|
+
stack_append("")
|
|
2119
|
+
else:
|
|
2120
|
+
leaves = [stack.pop() for _ in range(leaf_count)][::-1]
|
|
2121
|
+
hashes = []
|
|
2122
|
+
for leaf in leaves:
|
|
2123
|
+
if isinstance(leaf, dict):
|
|
2124
|
+
import json; leaf = json.dumps(leaf, sort_keys=True)
|
|
2125
|
+
if not isinstance(leaf, (str, bytes)): leaf = str(leaf)
|
|
2126
|
+
if isinstance(leaf, str): leaf = leaf.encode('utf-8')
|
|
2127
|
+
hashes.append(hashlib.sha256(leaf).hexdigest())
|
|
2128
|
+
while len(hashes) > 1:
|
|
2129
|
+
if len(hashes) % 2 != 0: hashes.append(hashes[-1])
|
|
2130
|
+
new_hashes = []
|
|
2131
|
+
for i in range(0, len(hashes), 2):
|
|
2132
|
+
combined = (hashes[i] + hashes[i+1]).encode('utf-8')
|
|
2133
|
+
new_hashes.append(hashlib.sha256(combined).hexdigest())
|
|
2134
|
+
hashes = new_hashes
|
|
2135
|
+
stack_append(hashes[0] if hashes else "")
|
|
2136
|
+
|
|
2137
|
+
elif op_name == "STATE_READ":
|
|
2138
|
+
if operand is None:
|
|
2139
|
+
key = stack_pop()
|
|
2140
|
+
if hasattr(key, 'value'): key = key.value
|
|
2141
|
+
else:
|
|
2142
|
+
key = const(operand)
|
|
2143
|
+
stack_append(env.setdefault("_blockchain_state", {}).get(key))
|
|
2144
|
+
|
|
2145
|
+
elif op_name == "STATE_WRITE":
|
|
2146
|
+
val = stack_pop()
|
|
2147
|
+
if hasattr(val, 'value'): val = val.value
|
|
2148
|
+
if operand is None:
|
|
2149
|
+
key = stack_pop()
|
|
2150
|
+
if hasattr(key, 'value'): key = key.value
|
|
2151
|
+
else:
|
|
2152
|
+
key = const(operand)
|
|
2153
|
+
if env.get("_in_transaction", False):
|
|
2154
|
+
env.setdefault("_tx_pending_state", {})[key] = val
|
|
2155
|
+
else:
|
|
2156
|
+
env.setdefault("_blockchain_state", {})[key] = val
|
|
2157
|
+
|
|
2158
|
+
elif op_name == "TX_BEGIN":
|
|
2159
|
+
tx_stack = env.setdefault("_tx_stack", [])
|
|
2160
|
+
tx_stack.append({
|
|
2161
|
+
"snapshot": dict(env.get("_blockchain_state", {})),
|
|
2162
|
+
"pending": dict(env.get("_tx_pending_state", {})),
|
|
2163
|
+
})
|
|
2164
|
+
env["_in_transaction"] = True
|
|
2165
|
+
env["_tx_pending_state"] = {}
|
|
2166
|
+
env["_tx_snapshot"] = dict(env.get("_blockchain_state", {}))
|
|
2167
|
+
|
|
2168
|
+
elif op_name == "TX_COMMIT":
|
|
2169
|
+
if env.get("_in_transaction", False):
|
|
2170
|
+
env.setdefault("_blockchain_state", {}).update(
|
|
2171
|
+
env.get("_tx_pending_state", {}))
|
|
2172
|
+
env["_tx_pending_state"] = {}
|
|
2173
|
+
env.pop("_tx_snapshot", None)
|
|
2174
|
+
# Restore outer TX if nested
|
|
2175
|
+
tx_stack = env.get("_tx_stack", [])
|
|
2176
|
+
if tx_stack:
|
|
2177
|
+
tx_stack.pop()
|
|
2178
|
+
env["_in_transaction"] = bool(tx_stack)
|
|
2179
|
+
|
|
2180
|
+
elif op_name == "TX_REVERT":
|
|
2181
|
+
if env.get("_in_transaction", False):
|
|
2182
|
+
env["_blockchain_state"] = dict(env.get("_tx_snapshot", {}))
|
|
2183
|
+
env["_tx_pending_state"] = {}
|
|
2184
|
+
env.pop("_tx_snapshot", None)
|
|
2185
|
+
tx_stack = env.get("_tx_stack", [])
|
|
2186
|
+
if tx_stack:
|
|
2187
|
+
outer = tx_stack.pop()
|
|
2188
|
+
if tx_stack:
|
|
2189
|
+
env["_tx_snapshot"] = outer["snapshot"]
|
|
2190
|
+
env["_tx_pending_state"] = outer["pending"]
|
|
2191
|
+
env["_in_transaction"] = bool(env.get("_tx_stack", []))
|
|
2192
|
+
|
|
2193
|
+
elif op_name == "GAS_CHARGE":
|
|
2194
|
+
amount = operand if operand is not None else 0
|
|
2195
|
+
if _gas is not None:
|
|
2196
|
+
if not _gas.consume("GAS_CHARGE", amount=amount):
|
|
2197
|
+
if env.get("_in_transaction", False):
|
|
2198
|
+
env["_blockchain_state"] = dict(env.get("_tx_snapshot", {}))
|
|
2199
|
+
env["_in_transaction"] = False
|
|
2200
|
+
from .gas_metering import OutOfGasError
|
|
2201
|
+
raise OutOfGasError(_gas.gas_used, _gas.gas_limit, "GAS_CHARGE")
|
|
2202
|
+
# Sync env-based counter for backward compat
|
|
2203
|
+
if "_gas_remaining" in env:
|
|
2204
|
+
env["_gas_remaining"] = max(0, env["_gas_remaining"] - amount)
|
|
2205
|
+
else:
|
|
2206
|
+
# Fallback to env-based tracking when no GasMetering
|
|
2207
|
+
current = env.get("_gas_remaining", float('inf'))
|
|
2208
|
+
if current != float('inf'):
|
|
2209
|
+
new_gas = current - amount
|
|
2210
|
+
if new_gas < 0:
|
|
2211
|
+
if env.get("_in_transaction", False):
|
|
2212
|
+
env["_blockchain_state"] = dict(env.get("_tx_snapshot", {}))
|
|
2213
|
+
env["_in_transaction"] = False
|
|
2214
|
+
raise ZEvaluationError(
|
|
2215
|
+
f"Out of gas: required {amount}, remaining {current}")
|
|
2216
|
+
env["_gas_remaining"] = new_gas
|
|
2217
|
+
|
|
2218
|
+
elif op_name == "REQUIRE":
|
|
2219
|
+
message = stack_pop()
|
|
2220
|
+
if hasattr(message, 'value'): message = message.value
|
|
2221
|
+
condition = stack_pop()
|
|
2222
|
+
cond_val = condition.value if hasattr(condition, 'value') else condition
|
|
2223
|
+
if not cond_val:
|
|
2224
|
+
if env.get("_in_transaction", False):
|
|
2225
|
+
env["_blockchain_state"] = dict(env.get("_tx_snapshot", {}))
|
|
2226
|
+
env["_in_transaction"] = False
|
|
2227
|
+
env["_tx_pending_state"] = {}
|
|
2228
|
+
env.pop("_tx_snapshot", None)
|
|
2229
|
+
raise ZEvaluationError(f"Requirement failed: {message}")
|
|
2230
|
+
|
|
2231
|
+
elif op_name == "LEDGER_APPEND":
|
|
2232
|
+
entry = stack_pop()
|
|
2233
|
+
ledger = env.setdefault("_ledger", [])
|
|
2234
|
+
if len(ledger) < 10000: # Size limit
|
|
2235
|
+
if isinstance(entry, dict) and "timestamp" not in entry:
|
|
2236
|
+
entry["timestamp"] = time.time()
|
|
2237
|
+
ledger.append(entry)
|
|
2238
|
+
|
|
2239
|
+
elif op_name == "SETUP_TRY":
|
|
2240
|
+
handler_ip = int(operand) if operand is not None else ip
|
|
2241
|
+
env.setdefault("_try_stack_sync", []).append(handler_ip)
|
|
2242
|
+
|
|
2243
|
+
elif op_name == "POP_TRY":
|
|
2244
|
+
ts = env.get("_try_stack_sync", [])
|
|
2245
|
+
if ts: ts.pop()
|
|
2246
|
+
|
|
2247
|
+
elif op_name == "THROW":
|
|
2248
|
+
exc = stack_pop()
|
|
2249
|
+
ts = env.get("_try_stack_sync", [])
|
|
2250
|
+
if ts:
|
|
2251
|
+
handler_ip = ts.pop()
|
|
2252
|
+
stack_append(exc)
|
|
2253
|
+
ip = handler_ip
|
|
2254
|
+
else:
|
|
2255
|
+
msg = exc.value if hasattr(exc, 'value') else exc
|
|
2256
|
+
raise ZEvaluationError(str(msg))
|
|
2257
|
+
|
|
2258
|
+
elif op_name == "ENABLE_ERROR_MODE":
|
|
2259
|
+
env["_continue_on_error"] = True
|
|
2260
|
+
|
|
2261
|
+
elif op_name in ("PARALLEL_START", "PARALLEL_END"):
|
|
2262
|
+
pass # Marker ops — no-op in stack VM
|
|
2263
|
+
|
|
2264
|
+
elif op_name == "AUDIT_LOG":
|
|
2265
|
+
ts = time.time()
|
|
2266
|
+
data = stack_pop(); action = stack_pop()
|
|
2267
|
+
if hasattr(action, 'value'): action = action.value
|
|
2268
|
+
if hasattr(data, 'value'): data = data.value
|
|
2269
|
+
env.setdefault("_audit_log", []).append(
|
|
2270
|
+
{"timestamp": ts, "action": action, "data": data})
|
|
2271
|
+
|
|
2272
|
+
elif op_name == "RESTRICT_ACCESS":
|
|
2273
|
+
restriction = stack_pop(); prop = stack_pop(); obj = stack_pop()
|
|
2274
|
+
r_key = f"{obj}.{prop}" if prop else str(obj)
|
|
2275
|
+
env.setdefault("_restrictions", {})[r_key] = restriction
|
|
2276
|
+
# Enforcement via TX.caller
|
|
2277
|
+
caller = None
|
|
2278
|
+
tx_obj = env.get("TX")
|
|
2279
|
+
if tx_obj is not None and hasattr(tx_obj, 'get'):
|
|
2280
|
+
from ..object import String as _ZS
|
|
2281
|
+
cv = tx_obj.get(_ZS("caller"))
|
|
2282
|
+
if cv: caller = cv.value if hasattr(cv, 'value') else str(cv)
|
|
2283
|
+
rv = restriction.value if hasattr(restriction, 'value') else restriction
|
|
2284
|
+
if isinstance(rv, str) and rv == "owner_only":
|
|
2285
|
+
owner = env.get("owner")
|
|
2286
|
+
if owner is not None:
|
|
2287
|
+
ov = owner.value if hasattr(owner, 'value') else str(owner)
|
|
2288
|
+
if caller and caller != ov:
|
|
2289
|
+
raise ZEvaluationError(f"Access denied: '{r_key}' restricted to owner only")
|
|
2290
|
+
elif isinstance(rv, (list, tuple)):
|
|
2291
|
+
allowed = [a.value if hasattr(a, 'value') else str(a) for a in rv]
|
|
2292
|
+
if caller and caller not in allowed:
|
|
2293
|
+
raise ZEvaluationError(f"Access denied: '{r_key}' restricted to allowed addresses")
|
|
2294
|
+
|
|
2295
|
+
else:
|
|
2296
|
+
# Truly unknown op — fallback to async path
|
|
2297
|
+
return self._run_coroutine_sync(self._run_stack_bytecode(bytecode, debug))
|
|
2298
|
+
|
|
2299
|
+
return stack_pop() if stack else None
|
|
2300
|
+
|
|
2301
|
+
def _build_smart_contract(self, operand, stack, stack_pop, const, env):
|
|
2302
|
+
"""Create a real SmartContract from DEFINE_CONTRACT bytecode.
|
|
2303
|
+
|
|
2304
|
+
This mirrors the interpreter's eval_contract_statement logic:
|
|
2305
|
+
1. Pop evaluated storage initial values from the stack
|
|
2306
|
+
2. Create Action objects from the AST action nodes
|
|
2307
|
+
3. Construct a SmartContract with proper storage, deploy lifecycle
|
|
2308
|
+
4. Run the constructor if one exists
|
|
2309
|
+
"""
|
|
2310
|
+
from ..environment import Environment
|
|
2311
|
+
from ..object import Action, Null, Map, String, Integer
|
|
2312
|
+
from ..security import SmartContract
|
|
2313
|
+
|
|
2314
|
+
# Unpack operand: (ast_constant_index, storage_var_count)
|
|
2315
|
+
if isinstance(operand, tuple):
|
|
2316
|
+
ast_idx, storage_count = operand
|
|
2317
|
+
else:
|
|
2318
|
+
# Legacy single-int operand — treat as member_count with no AST
|
|
2319
|
+
ast_idx = None
|
|
2320
|
+
storage_count = operand or 0
|
|
2321
|
+
|
|
2322
|
+
# Pop contract name (pushed last, popped first)
|
|
2323
|
+
contract_name_raw = stack_pop()
|
|
2324
|
+
contract_name = contract_name_raw.value if hasattr(contract_name_raw, 'value') else str(contract_name_raw)
|
|
2325
|
+
|
|
2326
|
+
# Pop storage values (name, value pairs) in reverse push order
|
|
2327
|
+
storage = {}
|
|
2328
|
+
for _ in range(storage_count):
|
|
2329
|
+
raw_val = stack_pop()
|
|
2330
|
+
raw_name = stack_pop()
|
|
2331
|
+
var_name = raw_name.value if hasattr(raw_name, 'value') else str(raw_name)
|
|
2332
|
+
storage[var_name] = self._wrap_for_builtin(raw_val)
|
|
2333
|
+
|
|
2334
|
+
# Retrieve the AST node from the constants pool
|
|
2335
|
+
ast_node = const(ast_idx) if ast_idx is not None else None
|
|
2336
|
+
if ast_node is None:
|
|
2337
|
+
# Can't build a proper contract without the AST — return a Map fallback
|
|
2338
|
+
return ZMap({})
|
|
2339
|
+
|
|
2340
|
+
# Build a bridge Environment so Action closures can resolve outer vars
|
|
2341
|
+
bridge_env = Environment()
|
|
2342
|
+
if isinstance(env, dict):
|
|
2343
|
+
for k, v in env.items():
|
|
2344
|
+
bridge_env.set(k, self._wrap_for_builtin(v))
|
|
2345
|
+
elif hasattr(env, 'items'):
|
|
2346
|
+
for k, v in env.items():
|
|
2347
|
+
bridge_env.set(k, self._wrap_for_builtin(v))
|
|
2348
|
+
|
|
2349
|
+
# Create Action objects from AST action nodes
|
|
2350
|
+
actions = {}
|
|
2351
|
+
for act in getattr(ast_node, 'actions', []):
|
|
2352
|
+
act_name = act.name.value if hasattr(act.name, 'value') else str(act.name)
|
|
2353
|
+
action_obj = Action(act.parameters, act.body, bridge_env)
|
|
2354
|
+
actions[act_name] = action_obj
|
|
2355
|
+
|
|
2356
|
+
# Retrieve storage_vars AST nodes for SmartContract metadata
|
|
2357
|
+
storage_vars = getattr(ast_node, 'storage_vars', [])
|
|
2358
|
+
|
|
2359
|
+
# Create the real SmartContract
|
|
2360
|
+
contract = SmartContract(contract_name, storage_vars, actions)
|
|
2361
|
+
contract.deploy(evaluated_storage_values=storage)
|
|
2362
|
+
|
|
2363
|
+
# Run constructor if present
|
|
2364
|
+
if 'constructor' in actions:
|
|
2365
|
+
constructor = actions['constructor']
|
|
2366
|
+
contract_env = Environment(outer=bridge_env)
|
|
2367
|
+
|
|
2368
|
+
# Set up TX context
|
|
2369
|
+
import time as _time
|
|
2370
|
+
tx_context = Map({
|
|
2371
|
+
String("caller"): String("system"),
|
|
2372
|
+
String("timestamp"): Integer(int(_time.time())),
|
|
2373
|
+
})
|
|
2374
|
+
contract_env.set("TX", tx_context)
|
|
2375
|
+
|
|
2376
|
+
# Pre-populate environment with storage variables
|
|
2377
|
+
for sv in storage_vars:
|
|
2378
|
+
var_name = sv.name.value if hasattr(sv.name, 'value') else str(getattr(sv, 'name', ''))
|
|
2379
|
+
initial_val = contract.storage.get(var_name)
|
|
2380
|
+
if initial_val is not None:
|
|
2381
|
+
contract_env.set(var_name, initial_val)
|
|
2382
|
+
|
|
2383
|
+
# Execute constructor body via the evaluator
|
|
2384
|
+
try:
|
|
2385
|
+
from ..evaluator.core import Evaluator
|
|
2386
|
+
if self._action_evaluator is None:
|
|
2387
|
+
self._action_evaluator = Evaluator(use_vm=False)
|
|
2388
|
+
self._action_evaluator.eval_node(constructor.body, contract_env, [])
|
|
2389
|
+
except Exception as _ctor_err:
|
|
2390
|
+
# Log the error — silent failures can leave security-critical
|
|
2391
|
+
# storage (e.g. owner) uninitialised.
|
|
2392
|
+
import logging as _logging
|
|
2393
|
+
_logging.getLogger("zexus.vm").warning(
|
|
2394
|
+
"Contract '%s' constructor failed: %s", contract_name, _ctor_err)
|
|
2395
|
+
|
|
2396
|
+
# Sync modified variables back to storage
|
|
2397
|
+
for sv in storage_vars:
|
|
2398
|
+
var_name = sv.name.value if hasattr(sv.name, 'value') else str(getattr(sv, 'name', ''))
|
|
2399
|
+
val = contract_env.get(var_name)
|
|
2400
|
+
if val is not None:
|
|
2401
|
+
contract.storage.set(var_name, val)
|
|
2402
|
+
|
|
2403
|
+
return contract
|
|
2404
|
+
|
|
2405
|
+
def _invoke_callable_sync(self, fn, args):
|
|
2406
|
+
"""Synchronous callable invocation for fast dispatch."""
|
|
2407
|
+
if fn is None:
|
|
2408
|
+
return None
|
|
2409
|
+
real_fn = fn.fn if hasattr(fn, "fn") else fn
|
|
2410
|
+
ZAction, ZLambda = _get_action_types()
|
|
2411
|
+
if ZAction is not None and isinstance(real_fn, (ZAction, ZLambda)):
|
|
2412
|
+
# Try to compile to bytecode and execute in VM (fast path)
|
|
2413
|
+
action_bytecode = None
|
|
2414
|
+
try:
|
|
2415
|
+
if hasattr(real_fn, '_cached_bytecode'):
|
|
2416
|
+
action_bytecode = real_fn._cached_bytecode
|
|
2417
|
+
else:
|
|
2418
|
+
from ..evaluator.bytecode_compiler import EvaluatorBytecodeCompiler
|
|
2419
|
+
compiler = EvaluatorBytecodeCompiler(use_cache=False)
|
|
2420
|
+
action_bytecode = compiler.compile(real_fn.body, optimize=True)
|
|
2421
|
+
if action_bytecode and not compiler.errors:
|
|
2422
|
+
real_fn._cached_bytecode = action_bytecode
|
|
2423
|
+
except Exception:
|
|
2424
|
+
action_bytecode = None
|
|
2425
|
+
|
|
2426
|
+
if action_bytecode:
|
|
2427
|
+
# Execute via VM (fast)
|
|
2428
|
+
call_args = [self._wrap_for_builtin(arg) for arg in args]
|
|
2429
|
+
params = real_fn.parameters if hasattr(real_fn, 'parameters') else []
|
|
2430
|
+
local_env = {k.value if hasattr(k, 'value') else k: v for k, v in zip(params, call_args)}
|
|
2431
|
+
inner_vm = VM.create_child(parent_vm=self, env=local_env)
|
|
2432
|
+
try:
|
|
2433
|
+
result = inner_vm._run_stack_bytecode_sync(action_bytecode, debug=False)
|
|
2434
|
+
finally:
|
|
2435
|
+
self._return_vm_to_pool(inner_vm)
|
|
2436
|
+
return self._unwrap_after_builtin(result)
|
|
2437
|
+
else:
|
|
2438
|
+
# Fallback to interpreter (slow)
|
|
2439
|
+
try:
|
|
2440
|
+
from ..evaluator.core import Evaluator
|
|
2441
|
+
if self._action_evaluator is None:
|
|
2442
|
+
self._action_evaluator = Evaluator(use_vm=False)
|
|
2443
|
+
call_args = [self._wrap_for_builtin(arg) for arg in args]
|
|
2444
|
+
result = self._action_evaluator.apply_function(real_fn, call_args)
|
|
2445
|
+
return self._unwrap_after_builtin(result)
|
|
2446
|
+
except Exception:
|
|
2447
|
+
return None
|
|
2448
|
+
if callable(real_fn) and not _iscoroutinefunction(real_fn):
|
|
2449
|
+
try:
|
|
2450
|
+
wrap_args = hasattr(fn, "fn")
|
|
2451
|
+
call_args = [self._wrap_for_builtin(arg) for arg in args] if wrap_args else list(args)
|
|
2452
|
+
result = real_fn(*call_args)
|
|
2453
|
+
return self._unwrap_after_builtin(result) if wrap_args else result
|
|
2454
|
+
except Exception:
|
|
2455
|
+
return None
|
|
2456
|
+
if isinstance(fn, dict):
|
|
2457
|
+
# Function descriptor - execute bytecode
|
|
2458
|
+
bytecode = fn.get("bytecode")
|
|
2459
|
+
if bytecode:
|
|
2460
|
+
params = fn.get("parameters", [])
|
|
2461
|
+
local_env = {}
|
|
2462
|
+
for i, p in enumerate(params):
|
|
2463
|
+
pname = p.get("name") if isinstance(p, dict) else str(p)
|
|
2464
|
+
local_env[pname] = args[i] if i < len(args) else None
|
|
2465
|
+
# Share parent's gas metering so nested calls can't evade limits
|
|
2466
|
+
child_vm = VM.create_child(parent_vm=self, env=local_env)
|
|
2467
|
+
try:
|
|
2468
|
+
return child_vm._run_stack_bytecode_sync(bytecode, debug=False)
|
|
2469
|
+
finally:
|
|
2470
|
+
self._return_vm_to_pool(child_vm)
|
|
2471
|
+
# Fallback for async callables
|
|
2472
|
+
if _iscoroutinefunction(real_fn):
|
|
2473
|
+
return self._run_coroutine_sync(real_fn(*args))
|
|
2474
|
+
return None
|
|
2475
|
+
|
|
2476
|
+
# ==================== Core Execution: Stack Bytecode ====================
|
|
2477
|
+
|
|
2478
|
+
async def _run_stack_bytecode(self, bytecode, debug=False):
|
|
2479
|
+
# 0. Optional bytecode optimizations (peephole, SSA)
|
|
2480
|
+
consts = list(getattr(bytecode, "constants", []))
|
|
2481
|
+
instrs = list(getattr(bytecode, "instructions", []))
|
|
2482
|
+
|
|
2483
|
+
fast_single_shot = (
|
|
2484
|
+
self.fast_single_shot
|
|
2485
|
+
and isinstance(self.single_shot_max_instructions, int)
|
|
2486
|
+
and len(instrs) <= self.single_shot_max_instructions
|
|
2487
|
+
)
|
|
2488
|
+
|
|
2489
|
+
if not fast_single_shot and self.enable_bytecode_optimizer and self.bytecode_optimizer:
|
|
2490
|
+
try:
|
|
2491
|
+
normalized_for_opt: List[Tuple[Any, Any]] = []
|
|
2492
|
+
for instr in instrs:
|
|
2493
|
+
if instr is None:
|
|
2494
|
+
continue
|
|
2495
|
+
if isinstance(instr, tuple) and len(instr) >= 2:
|
|
2496
|
+
op = instr[0]
|
|
2497
|
+
operand = instr[1] if len(instr) == 2 else tuple(instr[1:])
|
|
2498
|
+
op_name = op.name if hasattr(op, "name") else op
|
|
2499
|
+
normalized_for_opt.append((str(op_name), operand))
|
|
2500
|
+
instrs = self.bytecode_optimizer.optimize(normalized_for_opt, consts)
|
|
2501
|
+
except Exception:
|
|
2502
|
+
pass
|
|
2503
|
+
|
|
2504
|
+
# Peephole optimization with constant pool awareness
|
|
2505
|
+
if not fast_single_shot and self.enable_peephole_optimizer and self.peephole_optimizer:
|
|
2506
|
+
try:
|
|
2507
|
+
instrs, consts = self.peephole_optimizer.optimize_bytecode(instrs, consts)
|
|
2508
|
+
except Exception:
|
|
2509
|
+
pass
|
|
2510
|
+
|
|
2511
|
+
# Normalize opcodes to names for SSA pipeline and stack dispatch
|
|
2512
|
+
normalized_instrs: List[Tuple[Any, Any]] = []
|
|
2513
|
+
for instr in instrs:
|
|
2514
|
+
if instr is None:
|
|
2515
|
+
continue
|
|
2516
|
+
if isinstance(instr, tuple) and len(instr) >= 2:
|
|
2517
|
+
op = instr[0]
|
|
2518
|
+
operand = instr[1] if len(instr) == 2 else tuple(instr[1:])
|
|
2519
|
+
op_name = op.name if hasattr(op, "name") else op
|
|
2520
|
+
normalized_instrs.append((op_name, operand))
|
|
2521
|
+
|
|
2522
|
+
instrs = normalized_instrs
|
|
2523
|
+
|
|
2524
|
+
if not self._native_jit_auto_enabled:
|
|
2525
|
+
self._opcode_exec_count += len(instrs)
|
|
2526
|
+
|
|
2527
|
+
if not fast_single_shot and self.enable_ssa and self.ssa_converter:
|
|
2528
|
+
try:
|
|
2529
|
+
ssa_program = self.ssa_converter.convert_to_ssa(instrs)
|
|
2530
|
+
ssa_instrs = destruct_ssa(ssa_program)
|
|
2531
|
+
instrs, consts = self._normalize_ssa_instructions(ssa_instrs, consts)
|
|
2532
|
+
except Exception:
|
|
2533
|
+
pass
|
|
2534
|
+
|
|
2535
|
+
# 1. JIT Check (with thread safety)
|
|
2536
|
+
if self.use_jit and self.jit_compiler:
|
|
2537
|
+
jit_function = None
|
|
2538
|
+
with self._jit_lock:
|
|
2539
|
+
bytecode_hash = self.jit_compiler._hash_bytecode(bytecode)
|
|
2540
|
+
jit_function = self.jit_compiler.compilation_cache.get(bytecode_hash)
|
|
2541
|
+
|
|
2542
|
+
if jit_function:
|
|
2543
|
+
try:
|
|
2544
|
+
start_t = time.perf_counter()
|
|
2545
|
+
stack = []
|
|
2546
|
+
result = jit_function(self, stack, self.env)
|
|
2547
|
+
with self._jit_lock:
|
|
2548
|
+
self.jit_compiler.stats.cache_hits += 1
|
|
2549
|
+
self.jit_compiler.record_execution_time(bytecode_hash, time.perf_counter() - start_t, ExecutionTier.JIT_NATIVE)
|
|
2550
|
+
if debug: print(f"[VM JIT] Executed cached function")
|
|
2551
|
+
return result
|
|
2552
|
+
except Exception as e:
|
|
2553
|
+
if debug: print(f"[VM JIT] Failed: {e}, falling back")
|
|
2554
|
+
|
|
2555
|
+
# 2. Bytecode Execution Setup
|
|
2556
|
+
ip = 0
|
|
2557
|
+
trace_interval = 0
|
|
2558
|
+
try:
|
|
2559
|
+
trace_interval = int(os.environ.get("ZEXUS_VM_TRACE_INTERVAL", "0"))
|
|
2560
|
+
except Exception:
|
|
2561
|
+
trace_interval = 0
|
|
2562
|
+
trace_counter = 0
|
|
2563
|
+
running = True
|
|
2564
|
+
return_value = None
|
|
2565
|
+
profile_flag = os.environ.get("ZEXUS_VM_PROFILE_OPS")
|
|
2566
|
+
profile_ops = profile_flag is not None and profile_flag.lower() not in ("0", "false", "off")
|
|
2567
|
+
profile_verbose_flag = os.environ.get("ZEXUS_VM_PROFILE_VERBOSE")
|
|
2568
|
+
profile_verbose = profile_verbose_flag and profile_verbose_flag.lower() not in ("0", "false", "off")
|
|
2569
|
+
opcode_counts: Optional[Dict[str, int]] = {} if profile_ops else None
|
|
2570
|
+
if profile_ops and profile_verbose:
|
|
2571
|
+
print(f"[VM DEBUG] opcode profiling enabled; instrs={len(instrs)}")
|
|
2572
|
+
gas_metering = self.gas_metering
|
|
2573
|
+
gas_light = self.enable_gas_light and gas_metering is not None
|
|
2574
|
+
gas_consume = gas_metering.consume if gas_metering else None
|
|
2575
|
+
gas_consume_light = gas_metering.consume_light if gas_metering else None
|
|
2576
|
+
gas_enabled = self.enable_gas_metering and gas_metering is not None
|
|
2577
|
+
trace_ip_range = None
|
|
2578
|
+
trace_ip_env = os.environ.get("ZEXUS_VM_TRACE_IP_RANGE")
|
|
2579
|
+
if trace_ip_env:
|
|
2580
|
+
try:
|
|
2581
|
+
parts = str(trace_ip_env).split("-", 1)
|
|
2582
|
+
if len(parts) == 2:
|
|
2583
|
+
trace_ip_range = (int(parts[0]), int(parts[1]))
|
|
2584
|
+
except Exception:
|
|
2585
|
+
trace_ip_range = None
|
|
2586
|
+
|
|
2587
|
+
trace_loads_flag = os.environ.get("ZEXUS_VM_TRACE_LOADS")
|
|
2588
|
+
trace_loads_active = trace_loads_flag and trace_loads_flag.lower() not in ("0", "false", "off")
|
|
2589
|
+
trace_calls_flag = os.environ.get("ZEXUS_VM_TRACE_CALLS")
|
|
2590
|
+
trace_calls_active = trace_calls_flag and trace_calls_flag.lower() not in ("0", "false", "off")
|
|
2591
|
+
trace_targets_flag = os.environ.get("ZEXUS_VM_TRACE_METHOD_TARGETS")
|
|
2592
|
+
trace_targets_active = trace_targets_flag and trace_targets_flag.lower() not in ("0", "false", "off")
|
|
2593
|
+
# Hoist CALL_METHOD env lookups (were computed per-call)
|
|
2594
|
+
_trace_stack_flag = os.environ.get("ZEXUS_VM_TRACE_STACK")
|
|
2595
|
+
_trace_stack_active = bool(_trace_stack_flag and _trace_stack_flag.lower() not in ("0", "false", "off"))
|
|
2596
|
+
_trace_method_ops_flag = os.environ.get("ZEXUS_VM_TRACE_METHOD_OPS")
|
|
2597
|
+
_trace_method_ops_targets = None
|
|
2598
|
+
if _trace_method_ops_flag:
|
|
2599
|
+
try:
|
|
2600
|
+
_trace_method_ops_targets = [m.strip() for m in _trace_method_ops_flag.split(",") if m.strip()]
|
|
2601
|
+
except Exception:
|
|
2602
|
+
_trace_method_ops_targets = None
|
|
2603
|
+
_verbose_active = profile_verbose
|
|
2604
|
+
# Cached local ref to iscoroutinefunction (avoids asyncio.X attribute lookup)
|
|
2605
|
+
_iscoroutinefunction_local = _iscoroutinefunction
|
|
2606
|
+
|
|
2607
|
+
# Pre-resolve security module for CALL_METHOD
|
|
2608
|
+
_cached_security = _get_security_mod()
|
|
2609
|
+
# Pre-resolve Action/Lambda types for _invoke_callable_sync
|
|
2610
|
+
_cached_ZAction, _cached_ZLambda = _get_action_types()
|
|
2611
|
+
|
|
2612
|
+
class _EvalStack:
|
|
2613
|
+
__slots__ = ("data", "sp")
|
|
2614
|
+
|
|
2615
|
+
def __init__(self, capacity: int):
|
|
2616
|
+
base = max(32, capacity)
|
|
798
2617
|
self.data = [None] * base
|
|
799
2618
|
self.sp = 0
|
|
800
2619
|
|
|
@@ -837,66 +2656,128 @@ class VM:
|
|
|
837
2656
|
return self.data[:self.sp]
|
|
838
2657
|
|
|
839
2658
|
stack = _EvalStack(len(instrs) * 2 if instrs else 32)
|
|
2659
|
+
stack_append = stack.append
|
|
2660
|
+
stack_pop = stack.pop
|
|
2661
|
+
call_cache: Dict[str, Tuple[Any, int]] = {}
|
|
2662
|
+
|
|
2663
|
+
def const(idx):
|
|
2664
|
+
if isinstance(idx, int):
|
|
2665
|
+
return consts[idx] if 0 <= idx < len(consts) else None
|
|
2666
|
+
return idx
|
|
840
2667
|
|
|
841
|
-
|
|
2668
|
+
missing = object()
|
|
2669
|
+
env_get = self.env.get
|
|
2670
|
+
closure_get = self._closure_cells.get
|
|
2671
|
+
builtins_get = self.builtins.get
|
|
2672
|
+
name_cache = self._name_cache
|
|
842
2673
|
|
|
843
2674
|
# Lexical Resolution Helper (Closures/Cells)
|
|
844
2675
|
def _resolve(name):
|
|
2676
|
+
cached = name_cache.get(name)
|
|
2677
|
+
if cached and cached[1] == self._env_version:
|
|
2678
|
+
return cached[0]
|
|
845
2679
|
# 1. Local
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
2680
|
+
val = env_get(name, missing)
|
|
2681
|
+
if val is not missing:
|
|
2682
|
+
resolved = val.value if isinstance(val, Cell) else val
|
|
2683
|
+
name_cache[name] = (resolved, self._env_version)
|
|
2684
|
+
return resolved
|
|
849
2685
|
# 2. Closure Cells (attached to VM)
|
|
850
|
-
|
|
851
|
-
|
|
2686
|
+
cell = closure_get(name)
|
|
2687
|
+
if cell is not None:
|
|
2688
|
+
resolved = cell.value
|
|
2689
|
+
name_cache[name] = (resolved, self._env_version)
|
|
2690
|
+
return resolved
|
|
852
2691
|
# 3. Parent Chain
|
|
853
2692
|
p = self._parent_env
|
|
854
2693
|
while p is not None:
|
|
855
2694
|
if isinstance(p, VM):
|
|
856
|
-
|
|
857
|
-
|
|
2695
|
+
p_val = p.env.get(name, missing)
|
|
2696
|
+
if p_val is not missing:
|
|
2697
|
+
val = p_val
|
|
858
2698
|
return val.value if isinstance(val, Cell) else val
|
|
859
|
-
|
|
860
|
-
|
|
2699
|
+
p_cell = p._closure_cells.get(name)
|
|
2700
|
+
if p_cell is not None:
|
|
2701
|
+
return p_cell.value
|
|
861
2702
|
p = p._parent_env
|
|
862
2703
|
else:
|
|
863
|
-
if name in p:
|
|
2704
|
+
if name in p:
|
|
2705
|
+
return p[name]
|
|
864
2706
|
p = None
|
|
865
2707
|
return None
|
|
866
2708
|
|
|
867
2709
|
def _store(name, value):
|
|
868
2710
|
# Update existing Cell in local env
|
|
869
2711
|
if name in self.env and isinstance(self.env[name], Cell):
|
|
870
|
-
self.env[name].value = value
|
|
2712
|
+
self.env[name].value = value
|
|
2713
|
+
self._bump_env_version(name, value)
|
|
2714
|
+
return
|
|
871
2715
|
# Update local non-cell
|
|
872
2716
|
if name in self.env:
|
|
873
|
-
self.env[name] = value
|
|
2717
|
+
self.env[name] = value
|
|
2718
|
+
self._bump_env_version(name, value)
|
|
2719
|
+
return
|
|
874
2720
|
# Update Closure Cell
|
|
875
2721
|
if name in self._closure_cells:
|
|
876
|
-
self._closure_cells[name].value = value
|
|
2722
|
+
self._closure_cells[name].value = value
|
|
2723
|
+
self._bump_env_version(name, value)
|
|
2724
|
+
return
|
|
877
2725
|
# Update Parent Chain
|
|
878
2726
|
p = self._parent_env
|
|
879
2727
|
while p is not None:
|
|
880
2728
|
if isinstance(p, VM):
|
|
881
2729
|
if name in p._closure_cells:
|
|
882
|
-
p._closure_cells[name].value = value
|
|
2730
|
+
p._closure_cells[name].value = value
|
|
2731
|
+
p._bump_env_version(name, value)
|
|
2732
|
+
self._bump_env_version(name, value)
|
|
2733
|
+
return
|
|
883
2734
|
if name in p.env:
|
|
884
|
-
p.env[name] = value
|
|
2735
|
+
p.env[name] = value
|
|
2736
|
+
p._bump_env_version(name, value)
|
|
2737
|
+
self._bump_env_version(name, value)
|
|
2738
|
+
return
|
|
885
2739
|
p = p._parent_env
|
|
886
2740
|
else:
|
|
887
2741
|
if name in p:
|
|
888
|
-
p[name] = value
|
|
2742
|
+
p[name] = value
|
|
2743
|
+
self._bump_env_version(name, value)
|
|
2744
|
+
return
|
|
889
2745
|
p = None
|
|
890
2746
|
# Default: Create local
|
|
891
2747
|
self.env[name] = value
|
|
2748
|
+
self._bump_env_version(name, value)
|
|
2749
|
+
|
|
2750
|
+
def _resolve_callable(name):
|
|
2751
|
+
cached = call_cache.get(name)
|
|
2752
|
+
if cached and cached[1] == self._env_version:
|
|
2753
|
+
return cached[0]
|
|
2754
|
+
fn = None
|
|
2755
|
+
try:
|
|
2756
|
+
fn = builtins_get(name)
|
|
2757
|
+
except Exception:
|
|
2758
|
+
fn = None
|
|
2759
|
+
if fn is None:
|
|
2760
|
+
fn = _resolve(name)
|
|
2761
|
+
call_cache[name] = (fn, self._env_version)
|
|
2762
|
+
return fn
|
|
892
2763
|
|
|
893
2764
|
def _unwrap(value):
|
|
2765
|
+
if isinstance(value, ZNull):
|
|
2766
|
+
return None
|
|
894
2767
|
return value.value if hasattr(value, 'value') else value
|
|
895
2768
|
|
|
896
2769
|
def _binary_op(func):
|
|
897
2770
|
def wrapper(_):
|
|
898
2771
|
b = _unwrap(stack.pop() if stack else 0)
|
|
899
2772
|
a = _unwrap(stack.pop() if stack else 0)
|
|
2773
|
+
if a is None: a = 0
|
|
2774
|
+
if b is None: b = 0
|
|
2775
|
+
if isinstance(a, ZEvaluationError):
|
|
2776
|
+
stack.append(a)
|
|
2777
|
+
return
|
|
2778
|
+
if isinstance(b, ZEvaluationError):
|
|
2779
|
+
stack.append(b)
|
|
2780
|
+
return
|
|
900
2781
|
try:
|
|
901
2782
|
stack.append(func(a, b))
|
|
902
2783
|
except Exception as exc:
|
|
@@ -907,26 +2788,57 @@ class VM:
|
|
|
907
2788
|
|
|
908
2789
|
def _binary_bool_op(func):
|
|
909
2790
|
def wrapper(_):
|
|
910
|
-
b = stack.pop() if stack else None
|
|
911
|
-
a = stack.pop() if stack else None
|
|
2791
|
+
b = _unwrap(stack.pop() if stack else None)
|
|
2792
|
+
a = _unwrap(stack.pop() if stack else None)
|
|
2793
|
+
if isinstance(a, ZEvaluationError):
|
|
2794
|
+
stack.append(a)
|
|
2795
|
+
return
|
|
2796
|
+
if isinstance(b, ZEvaluationError):
|
|
2797
|
+
stack.append(b)
|
|
2798
|
+
return
|
|
912
2799
|
stack.append(func(a, b))
|
|
913
2800
|
return wrapper
|
|
914
2801
|
|
|
915
2802
|
async def _op_call_name(operand):
|
|
916
2803
|
if not operand:
|
|
917
|
-
|
|
2804
|
+
stack_append(None)
|
|
918
2805
|
return
|
|
919
2806
|
name_idx, arg_count = operand
|
|
920
2807
|
func_name = const(name_idx)
|
|
921
|
-
|
|
922
|
-
|
|
2808
|
+
if arg_count:
|
|
2809
|
+
args = [stack_pop() if stack else None for _ in range(arg_count)]
|
|
2810
|
+
args.reverse()
|
|
2811
|
+
else:
|
|
2812
|
+
args = []
|
|
2813
|
+
fn = None
|
|
2814
|
+
try:
|
|
2815
|
+
fn = builtins_get(func_name)
|
|
2816
|
+
except Exception:
|
|
2817
|
+
fn = None
|
|
2818
|
+
if fn is None:
|
|
2819
|
+
fn = _resolve_callable(func_name)
|
|
2820
|
+
if fn is None:
|
|
2821
|
+
fallback_res = self._call_fallback_builtin(func_name, args)
|
|
2822
|
+
stack_append(fallback_res)
|
|
2823
|
+
return
|
|
2824
|
+
real_fn = fn.fn if hasattr(fn, "fn") else fn
|
|
2825
|
+
if callable(real_fn) and not _iscoroutinefunction_local(real_fn):
|
|
2826
|
+
res = self._invoke_callable_sync(fn, args)
|
|
2827
|
+
stack_append(res)
|
|
2828
|
+
return
|
|
923
2829
|
res = await self._invoke_callable_or_funcdesc(fn, args)
|
|
924
|
-
|
|
2830
|
+
stack_append(res)
|
|
925
2831
|
|
|
926
2832
|
async def _op_call_top(arg_count):
|
|
927
2833
|
count = arg_count or 0
|
|
928
|
-
|
|
929
|
-
|
|
2834
|
+
# Use stack_pop to avoid crash on empty stack
|
|
2835
|
+
args = [stack_pop() for _ in range(count)][::-1] if count else []
|
|
2836
|
+
fn_obj = stack_pop()
|
|
2837
|
+
real_fn = fn_obj.fn if hasattr(fn_obj, "fn") else fn_obj
|
|
2838
|
+
if callable(real_fn) and not _iscoroutinefunction_local(real_fn):
|
|
2839
|
+
res = self._invoke_callable_sync(fn_obj, args)
|
|
2840
|
+
stack.append(res)
|
|
2841
|
+
return
|
|
930
2842
|
res = await self._invoke_callable_or_funcdesc(fn_obj, args)
|
|
931
2843
|
stack.append(res)
|
|
932
2844
|
|
|
@@ -936,21 +2848,157 @@ class VM:
|
|
|
936
2848
|
return
|
|
937
2849
|
|
|
938
2850
|
method_idx, arg_count = operand
|
|
939
|
-
|
|
940
|
-
|
|
2851
|
+
if _trace_stack_active:
|
|
2852
|
+
if len(stack) < arg_count + 1:
|
|
2853
|
+
try:
|
|
2854
|
+
window = []
|
|
2855
|
+
start = max(0, ip - 12)
|
|
2856
|
+
for k in range(start, min(len(instrs), ip + 1)):
|
|
2857
|
+
instr = instrs[k]
|
|
2858
|
+
if instr is None:
|
|
2859
|
+
continue
|
|
2860
|
+
opk = instr[0] if isinstance(instr, tuple) else instr
|
|
2861
|
+
namek = opk.name if hasattr(opk, "name") else str(opk)
|
|
2862
|
+
operk = instr[1] if isinstance(instr, tuple) and len(instr) > 1 else None
|
|
2863
|
+
if namek in ("LOAD_NAME", "LOAD_CONST"):
|
|
2864
|
+
try:
|
|
2865
|
+
val = const(operk)
|
|
2866
|
+
except Exception:
|
|
2867
|
+
val = operk
|
|
2868
|
+
window.append(f"{k}:{namek}={val}")
|
|
2869
|
+
else:
|
|
2870
|
+
window.append(f"{k}:{namek}")
|
|
2871
|
+
try:
|
|
2872
|
+
tail = stack.snapshot()[-8:]
|
|
2873
|
+
except Exception:
|
|
2874
|
+
tail = "<unavailable>"
|
|
2875
|
+
print(
|
|
2876
|
+
f"[VM TRACE] stack_underflow ip={ip} method={const(method_idx)} "
|
|
2877
|
+
f"argc={arg_count} stack={len(stack)} tail={tail} ops={'|'.join(window)}"
|
|
2878
|
+
)
|
|
2879
|
+
except Exception:
|
|
2880
|
+
print(f"[VM TRACE] stack_underflow ip={ip} method={const(method_idx)} argc={arg_count} stack={len(stack)}")
|
|
2881
|
+
if len(stack) < arg_count + 1:
|
|
2882
|
+
missing = (arg_count + 1) - len(stack)
|
|
2883
|
+
for _ in range(missing):
|
|
2884
|
+
stack_append(None)
|
|
2885
|
+
args = [stack_pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
2886
|
+
target = stack_pop() if stack else None
|
|
941
2887
|
method_name = const(method_idx)
|
|
2888
|
+
if _trace_method_ops_targets:
|
|
2889
|
+
if method_name in _trace_method_ops_targets:
|
|
2890
|
+
try:
|
|
2891
|
+
window = []
|
|
2892
|
+
start = max(0, ip - 10)
|
|
2893
|
+
for k in range(start, min(len(instrs), ip + 2)):
|
|
2894
|
+
instr = instrs[k]
|
|
2895
|
+
if instr is None:
|
|
2896
|
+
continue
|
|
2897
|
+
opk = instr[0] if isinstance(instr, tuple) else instr
|
|
2898
|
+
namek = opk.name if hasattr(opk, "name") else str(opk)
|
|
2899
|
+
operk = instr[1] if isinstance(instr, tuple) and len(instr) > 1 else None
|
|
2900
|
+
if namek in ("LOAD_NAME", "LOAD_CONST", "STORE_NAME"):
|
|
2901
|
+
try:
|
|
2902
|
+
val = const(operk)
|
|
2903
|
+
except Exception:
|
|
2904
|
+
val = operk
|
|
2905
|
+
window.append(f"{k}:{namek}={val}")
|
|
2906
|
+
else:
|
|
2907
|
+
window.append(f"{k}:{namek}")
|
|
2908
|
+
print(f"[VM TRACE] method_ops {method_name} ip={ip} ops={'|'.join(window)}")
|
|
2909
|
+
except Exception:
|
|
2910
|
+
print(f"[VM TRACE] method_ops {method_name} ip={ip}")
|
|
2911
|
+
|
|
2912
|
+
if trace_calls_active:
|
|
2913
|
+
try:
|
|
2914
|
+
interval = int(trace_calls_flag) if trace_calls_flag.isdigit() else 1000
|
|
2915
|
+
except Exception:
|
|
2916
|
+
interval = 1000
|
|
2917
|
+
self._call_method_total += 1
|
|
2918
|
+
if interval > 0 and self._call_method_total % interval == 0:
|
|
2919
|
+
target_type = type(target).__name__ if target is not None else "None"
|
|
2920
|
+
print(
|
|
2921
|
+
f"[VM TRACE] CALL_METHOD total={self._call_method_total} method={method_name} "
|
|
2922
|
+
f"argc={arg_count} target={target_type}"
|
|
2923
|
+
)
|
|
942
2924
|
|
|
943
2925
|
if target is None:
|
|
944
|
-
|
|
2926
|
+
if trace_targets_active:
|
|
2927
|
+
if method_name in ("submit_transaction_fast", "produce_single_tx_block", "produce_blocks_fast_until_empty"):
|
|
2928
|
+
if self._method_target_trace_count < 10:
|
|
2929
|
+
env_val = self.env.get("blockchain") if isinstance(self.env, dict) else None
|
|
2930
|
+
env_type = type(env_val).__name__ if env_val is not None else "None"
|
|
2931
|
+
stack_size = len(stack) if hasattr(stack, "__len__") else -1
|
|
2932
|
+
print(f"[VM TRACE] {method_name} target None; env.blockchain={env_type} arg_count={arg_count} stack={stack_size}")
|
|
2933
|
+
self._method_target_trace_count += 1
|
|
2934
|
+
stack_append(None)
|
|
945
2935
|
return
|
|
946
2936
|
|
|
947
2937
|
result = None
|
|
2938
|
+
if _verbose_active and self._call_method_trace_count < 25:
|
|
2939
|
+
target_type = type(target).__name__
|
|
2940
|
+
preview = []
|
|
2941
|
+
for item in args[:3]:
|
|
2942
|
+
try:
|
|
2943
|
+
preview.append(repr(item))
|
|
2944
|
+
except Exception:
|
|
2945
|
+
preview.append(f"<{type(item).__name__}>")
|
|
2946
|
+
print(f"[VM TRACE] CALL_METHOD {method_name} target={target_type} args={len(args)} preview={preview}")
|
|
2947
|
+
self._call_method_trace_count += 1
|
|
948
2948
|
try:
|
|
949
|
-
if
|
|
2949
|
+
if method_name == "set":
|
|
2950
|
+
if isinstance(target, ZMap):
|
|
2951
|
+
if len(args) >= 2:
|
|
2952
|
+
key = args[0]
|
|
2953
|
+
if isinstance(key, ZString):
|
|
2954
|
+
norm_key = key.value
|
|
2955
|
+
elif isinstance(key, str):
|
|
2956
|
+
norm_key = key
|
|
2957
|
+
elif hasattr(key, "inspect"):
|
|
2958
|
+
norm_key = key.inspect()
|
|
2959
|
+
else:
|
|
2960
|
+
norm_key = str(key)
|
|
2961
|
+
existing = target.pairs.get(norm_key)
|
|
2962
|
+
if existing is not None and existing.__class__.__name__ == 'SealedObject':
|
|
2963
|
+
raise ZEvaluationError(f"Cannot modify sealed map key: {key}")
|
|
2964
|
+
target.pairs[norm_key] = args[1]
|
|
2965
|
+
result = args[1]
|
|
2966
|
+
else:
|
|
2967
|
+
result = None
|
|
2968
|
+
elif isinstance(target, ZList):
|
|
2969
|
+
if len(args) >= 2:
|
|
2970
|
+
target.set(args[0], args[1])
|
|
2971
|
+
result = args[1]
|
|
2972
|
+
else:
|
|
2973
|
+
result = None
|
|
2974
|
+
elif isinstance(target, (dict, list)):
|
|
2975
|
+
if len(args) >= 2:
|
|
2976
|
+
target[args[0]] = args[1]
|
|
2977
|
+
result = args[1]
|
|
2978
|
+
else:
|
|
2979
|
+
result = None
|
|
2980
|
+
elif method_name == "get":
|
|
2981
|
+
if isinstance(target, ZMap) and args:
|
|
2982
|
+
result = target.get(args[0])
|
|
2983
|
+
elif isinstance(target, dict) and args:
|
|
2984
|
+
result = target.get(args[0])
|
|
2985
|
+
elif hasattr(target, "call_method"):
|
|
950
2986
|
wrapped_args = [self._wrap_for_builtin(arg) for arg in args]
|
|
951
|
-
|
|
2987
|
+
if _cached_security is not None:
|
|
2988
|
+
try:
|
|
2989
|
+
_cached_security._set_vm_action_context(True)
|
|
2990
|
+
except Exception:
|
|
2991
|
+
pass
|
|
2992
|
+
try:
|
|
2993
|
+
result = target.call_method(method_name, wrapped_args)
|
|
2994
|
+
finally:
|
|
2995
|
+
if _cached_security is not None:
|
|
2996
|
+
try:
|
|
2997
|
+
_cached_security._set_vm_action_context(False)
|
|
2998
|
+
except Exception:
|
|
2999
|
+
pass
|
|
952
3000
|
else:
|
|
953
|
-
attr =
|
|
3001
|
+
attr = self._get_cached_method(target, method_name)
|
|
954
3002
|
if callable(attr):
|
|
955
3003
|
result = attr(*args)
|
|
956
3004
|
elif isinstance(target, dict) and method_name in target:
|
|
@@ -958,39 +3006,93 @@ class VM:
|
|
|
958
3006
|
result = candidate(*args) if callable(candidate) else candidate
|
|
959
3007
|
else:
|
|
960
3008
|
result = attr
|
|
3009
|
+
if _verbose_active and self._call_method_trace_count <= 25:
|
|
3010
|
+
print(f"[VM TRACE] CALL_METHOD {method_name} result={result}")
|
|
3011
|
+
# Only check for coroutine/future on paths that can produce them
|
|
3012
|
+
# set/get paths are always sync; call_method/getattr may return coroutines
|
|
3013
|
+
if result is not None and (asyncio.iscoroutine(result) or isinstance(result, asyncio.Future)):
|
|
3014
|
+
if self.async_optimizer:
|
|
3015
|
+
result = await self.async_optimizer.await_optimized(result)
|
|
3016
|
+
else:
|
|
3017
|
+
result = await result
|
|
961
3018
|
except Exception as exc:
|
|
962
3019
|
if debug:
|
|
963
3020
|
print(f"[VM] CALL_METHOD failed for {method_name}: {exc}")
|
|
964
3021
|
raise
|
|
965
3022
|
|
|
966
|
-
|
|
3023
|
+
stack_append(self._unwrap_after_builtin(result))
|
|
3024
|
+
|
|
3025
|
+
stack_append = stack.append
|
|
3026
|
+
stack_pop = stack.pop
|
|
967
3027
|
|
|
968
3028
|
def _op_load_const(idx):
|
|
969
|
-
|
|
3029
|
+
value = const(idx)
|
|
3030
|
+
if self.integer_pool and isinstance(value, int):
|
|
3031
|
+
value = self.integer_pool.get(value)
|
|
3032
|
+
elif self.string_pool and isinstance(value, str):
|
|
3033
|
+
value = self.string_pool.get(value)
|
|
3034
|
+
stack_append(value)
|
|
3035
|
+
if trace_loads_active:
|
|
3036
|
+
if value is None:
|
|
3037
|
+
try:
|
|
3038
|
+
print(f"[VM TRACE] LOAD_CONST None ip={ip - 1} stack={len(stack)}")
|
|
3039
|
+
except Exception:
|
|
3040
|
+
print(f"[VM TRACE] LOAD_CONST None ip={ip - 1}")
|
|
970
3041
|
|
|
971
3042
|
def _op_load_name(idx):
|
|
972
3043
|
name = const(idx)
|
|
973
|
-
|
|
3044
|
+
stack_append(_resolve(name))
|
|
3045
|
+
if trace_loads_active:
|
|
3046
|
+
if name in ("blockchain", "sender"):
|
|
3047
|
+
try:
|
|
3048
|
+
print(f"[VM TRACE] LOAD_NAME {name} ip={ip - 1} stack={len(stack)}")
|
|
3049
|
+
except Exception:
|
|
3050
|
+
print(f"[VM TRACE] LOAD_NAME {name} ip={ip - 1}")
|
|
974
3051
|
|
|
975
3052
|
def _op_store_name(idx):
|
|
976
3053
|
name = const(idx)
|
|
977
|
-
val =
|
|
978
|
-
|
|
3054
|
+
val = stack_pop() if stack else None
|
|
3055
|
+
existing = env_get(name, missing)
|
|
3056
|
+
if existing is not missing and not isinstance(existing, Cell):
|
|
3057
|
+
self.env[name] = val
|
|
3058
|
+
self._bump_env_version(name, val)
|
|
3059
|
+
else:
|
|
3060
|
+
cell = closure_get(name)
|
|
3061
|
+
if cell is not None:
|
|
3062
|
+
cell.value = val
|
|
3063
|
+
self._bump_env_version(name, val)
|
|
3064
|
+
else:
|
|
3065
|
+
_store(name, val)
|
|
979
3066
|
if self.use_memory_manager and val is not None:
|
|
980
|
-
self._allocate_managed(val, name=name)
|
|
981
|
-
|
|
3067
|
+
self._allocate_managed(val, name=name, root=True)
|
|
3068
|
+
return
|
|
982
3069
|
def _op_pop(_):
|
|
983
3070
|
if stack:
|
|
984
|
-
|
|
3071
|
+
stack_pop()
|
|
985
3072
|
|
|
986
3073
|
def _op_dup(_):
|
|
987
3074
|
if stack:
|
|
988
|
-
stack.
|
|
3075
|
+
stack_append(stack.peek())
|
|
989
3076
|
|
|
990
3077
|
def _op_neg(_):
|
|
991
3078
|
a = _unwrap(stack.pop() if stack else 0)
|
|
992
3079
|
stack.append(-a)
|
|
993
3080
|
|
|
3081
|
+
def _op_add(_):
|
|
3082
|
+
b = _unwrap(stack_pop() if stack else 0)
|
|
3083
|
+
a = _unwrap(stack_pop() if stack else 0)
|
|
3084
|
+
if a is None:
|
|
3085
|
+
a = 0
|
|
3086
|
+
if b is None:
|
|
3087
|
+
b = 0
|
|
3088
|
+
if isinstance(a, ZEvaluationError):
|
|
3089
|
+
stack_append(a)
|
|
3090
|
+
return
|
|
3091
|
+
if isinstance(b, ZEvaluationError):
|
|
3092
|
+
stack_append(b)
|
|
3093
|
+
return
|
|
3094
|
+
stack_append(a + b)
|
|
3095
|
+
|
|
994
3096
|
def _op_not(_):
|
|
995
3097
|
a = stack.pop() if stack else False
|
|
996
3098
|
stack.append(not a)
|
|
@@ -1002,7 +3104,8 @@ class VM:
|
|
|
1002
3104
|
def _op_jump_if_false(target):
|
|
1003
3105
|
nonlocal ip
|
|
1004
3106
|
cond = stack.pop() if stack else None
|
|
1005
|
-
|
|
3107
|
+
cond_val = _unwrap(cond)
|
|
3108
|
+
if not cond_val:
|
|
1006
3109
|
ip = target
|
|
1007
3110
|
|
|
1008
3111
|
def _op_return(_):
|
|
@@ -1012,29 +3115,84 @@ class VM:
|
|
|
1012
3115
|
|
|
1013
3116
|
def _op_build_list(count):
|
|
1014
3117
|
total = count if count is not None else 0
|
|
1015
|
-
|
|
1016
|
-
|
|
3118
|
+
if self.list_pool:
|
|
3119
|
+
lst = self.allocate_list(total)
|
|
3120
|
+
if total > 0:
|
|
3121
|
+
for i in range(total - 1, -1, -1):
|
|
3122
|
+
lst[i] = stack.pop() if stack else None
|
|
3123
|
+
stack.append(lst)
|
|
3124
|
+
else:
|
|
3125
|
+
elements = [None] * total
|
|
3126
|
+
for i in range(total - 1, -1, -1):
|
|
3127
|
+
elements[i] = stack.pop() if stack else None
|
|
3128
|
+
stack.append(elements)
|
|
1017
3129
|
|
|
1018
3130
|
def _op_build_map(count):
|
|
1019
3131
|
total = count if count is not None else 0
|
|
1020
3132
|
result = {}
|
|
1021
3133
|
for _ in range(total):
|
|
1022
|
-
val =
|
|
3134
|
+
val = stack_pop() if stack else None
|
|
3135
|
+
key = stack_pop() if stack else None
|
|
1023
3136
|
result[key] = val
|
|
1024
|
-
|
|
3137
|
+
stack_append(result)
|
|
1025
3138
|
|
|
1026
3139
|
def _op_index(_):
|
|
1027
|
-
idx = stack.pop()
|
|
3140
|
+
idx = stack.pop() if stack else None
|
|
3141
|
+
obj = stack.pop() if stack else None
|
|
1028
3142
|
try:
|
|
1029
|
-
|
|
3143
|
+
if isinstance(obj, ZList):
|
|
3144
|
+
stack.append(obj.get(idx))
|
|
3145
|
+
elif isinstance(obj, ZMap):
|
|
3146
|
+
key = idx
|
|
3147
|
+
if isinstance(key, str):
|
|
3148
|
+
key = ZString(key)
|
|
3149
|
+
stack.append(obj.get(key))
|
|
3150
|
+
elif isinstance(obj, ZString):
|
|
3151
|
+
stack.append(obj[idx])
|
|
3152
|
+
else:
|
|
3153
|
+
# Fallback
|
|
3154
|
+
if obj is None:
|
|
3155
|
+
stack.append(None)
|
|
3156
|
+
else:
|
|
3157
|
+
raw_idx = idx.value if hasattr(idx, "value") else idx
|
|
3158
|
+
try:
|
|
3159
|
+
stack.append(obj[raw_idx])
|
|
3160
|
+
except Exception:
|
|
3161
|
+
stack.append(None)
|
|
1030
3162
|
except (IndexError, KeyError, TypeError):
|
|
1031
3163
|
stack.append(None)
|
|
1032
3164
|
|
|
3165
|
+
def _op_get_attr(_):
|
|
3166
|
+
attr = stack_pop() if stack else None
|
|
3167
|
+
obj = stack_pop() if stack else None
|
|
3168
|
+
if obj is None:
|
|
3169
|
+
stack_append(None)
|
|
3170
|
+
return
|
|
3171
|
+
attr_name = _unwrap(attr)
|
|
3172
|
+
try:
|
|
3173
|
+
if isinstance(obj, ZMap):
|
|
3174
|
+
key = attr_name
|
|
3175
|
+
if isinstance(key, str):
|
|
3176
|
+
key = ZString(key)
|
|
3177
|
+
stack_append(obj.get(key))
|
|
3178
|
+
elif isinstance(obj, dict):
|
|
3179
|
+
stack_append(obj.get(attr_name))
|
|
3180
|
+
else:
|
|
3181
|
+
stack_append(getattr(obj, attr_name, None))
|
|
3182
|
+
except Exception:
|
|
3183
|
+
stack_append(None)
|
|
3184
|
+
|
|
1033
3185
|
def _op_get_length(_):
|
|
1034
|
-
obj = stack.pop()
|
|
3186
|
+
obj = stack.pop() if stack else None
|
|
1035
3187
|
try:
|
|
1036
3188
|
if obj is None:
|
|
1037
3189
|
stack.append(0)
|
|
3190
|
+
elif isinstance(obj, ZList):
|
|
3191
|
+
stack.append(len(obj.elements))
|
|
3192
|
+
elif isinstance(obj, ZMap):
|
|
3193
|
+
stack.append(len(obj.pairs))
|
|
3194
|
+
elif isinstance(obj, ZString):
|
|
3195
|
+
stack.append(len(obj.value))
|
|
1038
3196
|
elif hasattr(obj, '__len__'):
|
|
1039
3197
|
stack.append(len(obj))
|
|
1040
3198
|
else:
|
|
@@ -1042,6 +3200,28 @@ class VM:
|
|
|
1042
3200
|
except (TypeError, AttributeError):
|
|
1043
3201
|
stack.append(0)
|
|
1044
3202
|
|
|
3203
|
+
def _op_read(_):
|
|
3204
|
+
path = stack.pop() if stack else None
|
|
3205
|
+
try:
|
|
3206
|
+
import os
|
|
3207
|
+
if path and os.path.exists(path):
|
|
3208
|
+
with open(path, 'r') as f:
|
|
3209
|
+
stack.append(f.read())
|
|
3210
|
+
else:
|
|
3211
|
+
stack.append(None)
|
|
3212
|
+
except:
|
|
3213
|
+
stack.append(None)
|
|
3214
|
+
|
|
3215
|
+
def _op_store_func(operand):
|
|
3216
|
+
name_idx, func_idx = operand
|
|
3217
|
+
name = const(name_idx)
|
|
3218
|
+
func = const(func_idx)
|
|
3219
|
+
_store(name, func)
|
|
3220
|
+
|
|
3221
|
+
def _op_print(_):
|
|
3222
|
+
val = stack_pop() if stack else None
|
|
3223
|
+
print(self._format_print_value(val))
|
|
3224
|
+
|
|
1045
3225
|
dispatch_table: Dict[str, Callable[[Any], Any]] = {
|
|
1046
3226
|
"LOAD_CONST": _op_load_const,
|
|
1047
3227
|
"LOAD_NAME": _op_load_name,
|
|
@@ -1051,7 +3231,7 @@ class VM:
|
|
|
1051
3231
|
"CALL_NAME": _op_call_name,
|
|
1052
3232
|
"CALL_TOP": _op_call_top,
|
|
1053
3233
|
"CALL_METHOD": _op_call_method,
|
|
1054
|
-
"ADD":
|
|
3234
|
+
"ADD": _op_add,
|
|
1055
3235
|
"SUB": _binary_op(lambda a, b: a - b),
|
|
1056
3236
|
"MUL": _binary_op(lambda a, b: a * b),
|
|
1057
3237
|
"DIV": _binary_op(lambda a, b: a / b if b != 0 else 0),
|
|
@@ -1071,424 +3251,1218 @@ class VM:
|
|
|
1071
3251
|
"BUILD_LIST": _op_build_list,
|
|
1072
3252
|
"BUILD_MAP": _op_build_map,
|
|
1073
3253
|
"INDEX": _op_index,
|
|
3254
|
+
"GET_ATTR": _op_get_attr,
|
|
1074
3255
|
"GET_LENGTH": _op_get_length,
|
|
3256
|
+
"READ": _op_read,
|
|
3257
|
+
"STORE_FUNC": _op_store_func,
|
|
3258
|
+
"PRINT": _op_print,
|
|
1075
3259
|
}
|
|
1076
3260
|
async_dispatch_ops = {"CALL_NAME", "CALL_TOP", "CALL_METHOD"}
|
|
3261
|
+
gas_kwarg_ops = {"BUILD_LIST", "BUILD_MAP", "MERKLE_ROOT", "CALL_NAME", "CALL_TOP", "CALL_METHOD", "CALL_BUILTIN"}
|
|
3262
|
+
|
|
3263
|
+
prepared_instrs: List[Tuple[str, Any, Optional[Callable[[Any], Any]], bool, int]] = []
|
|
3264
|
+
has_async_ops = False
|
|
3265
|
+
has_loop_ops = False
|
|
3266
|
+
loop_ops = {"JUMP", "JUMP_IF_FALSE", "JUMP_IF_TRUE", "FOR_ITER"}
|
|
3267
|
+
for instr in instrs:
|
|
3268
|
+
op_name, operand = instr
|
|
3269
|
+
handler = dispatch_table.get(op_name)
|
|
3270
|
+
is_async = op_name in async_dispatch_ops
|
|
3271
|
+
if is_async:
|
|
3272
|
+
has_async_ops = True
|
|
3273
|
+
if op_name in loop_ops:
|
|
3274
|
+
has_loop_ops = True
|
|
3275
|
+
gas_kind = 0
|
|
3276
|
+
if op_name in gas_kwarg_ops:
|
|
3277
|
+
if op_name in ("BUILD_LIST", "BUILD_MAP"):
|
|
3278
|
+
gas_kind = 1
|
|
3279
|
+
elif op_name == "MERKLE_ROOT":
|
|
3280
|
+
gas_kind = 2
|
|
3281
|
+
else:
|
|
3282
|
+
gas_kind = 3
|
|
3283
|
+
prepared_instrs.append((op_name, operand, handler, is_async, gas_kind))
|
|
1077
3284
|
|
|
1078
3285
|
# 3. Execution Loop
|
|
1079
3286
|
prev_ip = None
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
3287
|
+
try_stack: List[int] = []
|
|
3288
|
+
auto_fast_loop = (
|
|
3289
|
+
len(prepared_instrs) >= getattr(self, "fast_loop_threshold", 512)
|
|
3290
|
+
or has_loop_ops
|
|
3291
|
+
)
|
|
3292
|
+
fast_loop_allowed = (
|
|
3293
|
+
(self.enable_fast_loop or auto_fast_loop)
|
|
3294
|
+
and not profile_ops
|
|
3295
|
+
and not gas_enabled # Never skip gas metering (including gas_light)
|
|
3296
|
+
and not trace_interval
|
|
3297
|
+
and trace_ip_range is None
|
|
3298
|
+
and not trace_loads_active
|
|
3299
|
+
and not trace_calls_active
|
|
3300
|
+
and not trace_targets_active
|
|
3301
|
+
and not self.enable_profiling
|
|
3302
|
+
)
|
|
3303
|
+
missing_handlers = any(handler is None for _, _, handler, _, _ in prepared_instrs)
|
|
3304
|
+
if self.enable_fast_loop or auto_fast_loop:
|
|
3305
|
+
reasons = []
|
|
3306
|
+
if auto_fast_loop and not self.enable_fast_loop:
|
|
3307
|
+
reasons.append("auto")
|
|
1089
3308
|
if profile_ops:
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
if
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
if op_name == "CALL_NAME":
|
|
1115
|
-
gas_kwargs['arg_count'] = operand[1] if isinstance(operand, tuple) else 0
|
|
1116
|
-
elif op_name == "CALL_TOP":
|
|
1117
|
-
gas_kwargs['arg_count'] = operand if operand is not None else 0
|
|
1118
|
-
else:
|
|
1119
|
-
gas_kwargs['arg_count'] = operand[1] if isinstance(operand, tuple) else 0
|
|
1120
|
-
|
|
1121
|
-
# Consume gas for operation
|
|
1122
|
-
if not self.gas_metering.consume(op_name, **gas_kwargs):
|
|
1123
|
-
# Out of gas!
|
|
1124
|
-
if self.gas_metering.operation_count > self.gas_metering.max_operations:
|
|
1125
|
-
raise OperationLimitExceededError(
|
|
1126
|
-
self.gas_metering.operation_count,
|
|
1127
|
-
self.gas_metering.max_operations
|
|
1128
|
-
)
|
|
1129
|
-
else:
|
|
1130
|
-
raise OutOfGasError(
|
|
1131
|
-
self.gas_metering.gas_used,
|
|
1132
|
-
self.gas_metering.gas_limit,
|
|
1133
|
-
op_name
|
|
1134
|
-
)
|
|
3309
|
+
reasons.append("opcode_profile")
|
|
3310
|
+
if gas_enabled and not gas_light:
|
|
3311
|
+
reasons.append("gas")
|
|
3312
|
+
if trace_interval:
|
|
3313
|
+
reasons.append("trace_interval")
|
|
3314
|
+
if trace_ip_range is not None:
|
|
3315
|
+
reasons.append("trace_ip_range")
|
|
3316
|
+
if trace_loads_active:
|
|
3317
|
+
reasons.append("trace_loads")
|
|
3318
|
+
if trace_calls_active:
|
|
3319
|
+
reasons.append("trace_calls")
|
|
3320
|
+
if trace_targets_active:
|
|
3321
|
+
reasons.append("trace_targets")
|
|
3322
|
+
if self.enable_profiling:
|
|
3323
|
+
reasons.append("profiler")
|
|
3324
|
+
if missing_handlers:
|
|
3325
|
+
reasons.append("missing_handlers")
|
|
3326
|
+
self._fast_loop_stats = {
|
|
3327
|
+
"used": False,
|
|
3328
|
+
"reason": ",".join(reasons) if reasons else "conditions",
|
|
3329
|
+
"auto": auto_fast_loop,
|
|
3330
|
+
"threshold": getattr(self, "fast_loop_threshold", 512),
|
|
3331
|
+
"instr_count": len(prepared_instrs),
|
|
3332
|
+
}
|
|
1135
3333
|
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
3334
|
+
if fast_loop_allowed and not missing_handlers:
|
|
3335
|
+
self._fast_loop_stats = {
|
|
3336
|
+
"used": True,
|
|
3337
|
+
"reason": "",
|
|
3338
|
+
"auto": auto_fast_loop,
|
|
3339
|
+
"threshold": getattr(self, "fast_loop_threshold", 512),
|
|
3340
|
+
"instr_count": len(prepared_instrs),
|
|
3341
|
+
}
|
|
3342
|
+
local_prepared = prepared_instrs
|
|
3343
|
+
while running and ip < len(local_prepared):
|
|
3344
|
+
op_name, operand, handler, is_async, _gas_kind = local_prepared[ip]
|
|
3345
|
+
prev_ip = ip
|
|
3346
|
+
ip += 1
|
|
3347
|
+
if is_async:
|
|
1139
3348
|
await handler(operand)
|
|
1140
3349
|
else:
|
|
1141
3350
|
handler(operand)
|
|
1142
3351
|
if not running:
|
|
1143
3352
|
break
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
if hasattr(b, 'value'): b = b.value
|
|
1202
|
-
stack.append(a - b)
|
|
1203
|
-
elif op_name == "MUL":
|
|
1204
|
-
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
1205
|
-
if hasattr(a, 'value'): a = a.value
|
|
1206
|
-
if hasattr(b, 'value'): b = b.value
|
|
1207
|
-
stack.append(a * b)
|
|
1208
|
-
elif op_name == "DIV":
|
|
1209
|
-
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
1210
|
-
if hasattr(a, 'value'): a = a.value
|
|
1211
|
-
if hasattr(b, 'value'): b = b.value
|
|
1212
|
-
stack.append(a / b if b != 0 else 0)
|
|
1213
|
-
elif op_name == "MOD":
|
|
1214
|
-
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
1215
|
-
stack.append(a % b if b != 0 else 0)
|
|
1216
|
-
elif op_name == "POW":
|
|
1217
|
-
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
1218
|
-
stack.append(a ** b)
|
|
1219
|
-
elif op_name == "NEG":
|
|
1220
|
-
a = stack.pop() if stack else 0
|
|
1221
|
-
stack.append(-a)
|
|
1222
|
-
elif op_name == "EQ":
|
|
1223
|
-
b = stack.pop() if stack else None; a = stack.pop() if stack else None
|
|
1224
|
-
stack.append(a == b)
|
|
1225
|
-
elif op_name == "NEQ":
|
|
1226
|
-
b = stack.pop() if stack else None; a = stack.pop() if stack else None
|
|
1227
|
-
stack.append(a != b)
|
|
1228
|
-
elif op_name == "LT":
|
|
1229
|
-
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
1230
|
-
stack.append(a < b)
|
|
1231
|
-
elif op_name == "GT":
|
|
1232
|
-
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
1233
|
-
stack.append(a > b)
|
|
1234
|
-
elif op_name == "LTE":
|
|
1235
|
-
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
1236
|
-
stack.append(a <= b)
|
|
1237
|
-
elif op_name == "GTE":
|
|
1238
|
-
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
1239
|
-
stack.append(a >= b)
|
|
1240
|
-
elif op_name == "NOT":
|
|
1241
|
-
a = stack.pop() if stack else False
|
|
1242
|
-
stack.append(not a)
|
|
1243
|
-
|
|
1244
|
-
# --- Control Flow ---
|
|
1245
|
-
elif op_name == "JUMP":
|
|
1246
|
-
ip = operand
|
|
1247
|
-
elif op_name == "JUMP_IF_FALSE":
|
|
1248
|
-
cond = stack.pop() if stack else None
|
|
1249
|
-
if not cond: ip = operand
|
|
1250
|
-
elif op_name == "RETURN":
|
|
1251
|
-
return stack.pop() if stack else None
|
|
1252
|
-
|
|
1253
|
-
# --- Collections ---
|
|
1254
|
-
elif op_name == "BUILD_LIST":
|
|
1255
|
-
count = operand if operand is not None else 0
|
|
1256
|
-
elements = [stack.pop() for _ in range(count)][::-1]
|
|
1257
|
-
stack.append(elements)
|
|
1258
|
-
elif op == "BUILD_MAP":
|
|
1259
|
-
count = operand if operand is not None else 0
|
|
1260
|
-
result = {}
|
|
1261
|
-
for _ in range(count):
|
|
1262
|
-
val = stack.pop(); key = stack.pop()
|
|
1263
|
-
result[key] = val
|
|
1264
|
-
stack.append(result)
|
|
1265
|
-
elif op == "INDEX":
|
|
1266
|
-
idx = stack.pop(); obj = stack.pop()
|
|
1267
|
-
try: stack.append(obj[idx] if obj is not None else None)
|
|
1268
|
-
except (IndexError, KeyError, TypeError): stack.append(None)
|
|
1269
|
-
elif op == "GET_LENGTH":
|
|
1270
|
-
obj = stack.pop()
|
|
1271
|
-
try:
|
|
1272
|
-
if obj is None:
|
|
1273
|
-
stack.append(0)
|
|
1274
|
-
elif hasattr(obj, '__len__'):
|
|
1275
|
-
stack.append(len(obj))
|
|
3353
|
+
if not running:
|
|
3354
|
+
return return_value
|
|
3355
|
+
while running and ip < len(prepared_instrs):
|
|
3356
|
+
try:
|
|
3357
|
+
current_ip = ip
|
|
3358
|
+
op_name, operand, handler, is_async, gas_kind = prepared_instrs[current_ip]
|
|
3359
|
+
|
|
3360
|
+
if profile_ops:
|
|
3361
|
+
opcode_counts[op_name] = opcode_counts.get(op_name, 0) + 1
|
|
3362
|
+
|
|
3363
|
+
if debug: print(f"[VM SL] ip={ip} op={op} operand={operand} stack={stack.snapshot()}")
|
|
3364
|
+
|
|
3365
|
+
# Profile instruction (if enabled) - start timing
|
|
3366
|
+
instr_start_time = None
|
|
3367
|
+
if self.enable_profiling and self.profiler and self.profiler.enabled:
|
|
3368
|
+
if self.profiler.level in (ProfilingLevel.DETAILED, ProfilingLevel.FULL):
|
|
3369
|
+
instr_start_time = time.perf_counter()
|
|
3370
|
+
# OPTIMIZATION: Use stack.sp instead of len(stack) to avoid 500k function calls
|
|
3371
|
+
self.profiler.record_instruction(current_ip, op_name, operand, prev_ip, stack.sp)
|
|
3372
|
+
|
|
3373
|
+
prev_ip = current_ip
|
|
3374
|
+
ip += 1
|
|
3375
|
+
|
|
3376
|
+
if trace_interval > 0:
|
|
3377
|
+
trace_counter += 1
|
|
3378
|
+
if trace_counter % trace_interval == 0:
|
|
3379
|
+
try:
|
|
3380
|
+
stack_size = stack.sp # OPTIMIZATION: Direct attribute access
|
|
3381
|
+
except Exception:
|
|
3382
|
+
stack_size = -1
|
|
3383
|
+
print(f"[VM TRACE] async ip={current_ip} op={op_name} stack={stack_size}")
|
|
3384
|
+
|
|
3385
|
+
# === GAS METERING ===
|
|
3386
|
+
if gas_enabled:
|
|
3387
|
+
if gas_kind == 1:
|
|
3388
|
+
count = operand if operand is not None else 0
|
|
3389
|
+
if gas_light:
|
|
3390
|
+
ok = gas_consume_light(self.gas_light_cost)
|
|
3391
|
+
else:
|
|
3392
|
+
ok = gas_consume(op_name, count=count)
|
|
3393
|
+
elif gas_kind == 2:
|
|
3394
|
+
leaf_count = operand if operand is not None else 0
|
|
3395
|
+
if gas_light:
|
|
3396
|
+
ok = gas_consume_light(self.gas_light_cost)
|
|
3397
|
+
else:
|
|
3398
|
+
ok = gas_consume(op_name, leaf_count=leaf_count)
|
|
3399
|
+
elif gas_kind == 3:
|
|
3400
|
+
if op_name == "CALL_NAME":
|
|
3401
|
+
arg_count = operand[1] if isinstance(operand, tuple) else 0
|
|
3402
|
+
elif op_name == "CALL_TOP":
|
|
3403
|
+
arg_count = operand if operand is not None else 0
|
|
3404
|
+
else:
|
|
3405
|
+
arg_count = operand[1] if isinstance(operand, tuple) else 0
|
|
3406
|
+
if gas_light:
|
|
3407
|
+
ok = gas_consume_light(self.gas_light_cost)
|
|
3408
|
+
else:
|
|
3409
|
+
ok = gas_consume(op_name, arg_count=arg_count)
|
|
1276
3410
|
else:
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
3411
|
+
if gas_light:
|
|
3412
|
+
ok = gas_consume_light(self.gas_light_cost)
|
|
3413
|
+
else:
|
|
3414
|
+
ok = gas_consume(op_name)
|
|
3415
|
+
|
|
3416
|
+
# Consume gas for operation
|
|
3417
|
+
if not ok:
|
|
3418
|
+
# Out of gas!
|
|
3419
|
+
if gas_metering.operation_count > gas_metering.max_operations:
|
|
3420
|
+
raise OperationLimitExceededError(
|
|
3421
|
+
gas_metering.operation_count,
|
|
3422
|
+
gas_metering.max_operations
|
|
3423
|
+
)
|
|
3424
|
+
else:
|
|
3425
|
+
raise OutOfGasError(
|
|
3426
|
+
gas_metering.gas_used,
|
|
3427
|
+
gas_metering.gas_limit,
|
|
3428
|
+
op_name
|
|
3429
|
+
)
|
|
3430
|
+
|
|
3431
|
+
if trace_ip_range and trace_ip_range[0] <= current_ip <= trace_ip_range[1]:
|
|
3432
|
+
op_detail = op_name
|
|
3433
|
+
if op_name in ("LOAD_NAME", "STORE_NAME"):
|
|
3434
|
+
try:
|
|
3435
|
+
op_detail = f"{op_name}({const(operand)})"
|
|
3436
|
+
except Exception:
|
|
3437
|
+
op_detail = op_name
|
|
3438
|
+
elif op_name == "LOAD_CONST":
|
|
3439
|
+
try:
|
|
3440
|
+
op_detail = f"{op_name}({const(operand)})"
|
|
3441
|
+
except Exception:
|
|
3442
|
+
op_detail = op_name
|
|
3443
|
+
print(f"[VM TRACE] ip={current_ip} op={op_detail} pre_stack={len(stack)}")
|
|
3444
|
+
if handler is not None:
|
|
3445
|
+
if is_async:
|
|
3446
|
+
await handler(operand)
|
|
3447
|
+
else:
|
|
3448
|
+
handler(operand)
|
|
3449
|
+
if trace_ip_range and trace_ip_range[0] <= current_ip <= trace_ip_range[1]:
|
|
3450
|
+
print(f"[VM TRACE] ip={current_ip} op={op_detail} post_stack={len(stack)}")
|
|
3451
|
+
if not running:
|
|
3452
|
+
break
|
|
3453
|
+
continue
|
|
3454
|
+
|
|
3455
|
+
# --- Basic Stack Ops ---
|
|
3456
|
+
if op_name == "LOAD_CONST":
|
|
3457
|
+
stack.append(const(operand))
|
|
3458
|
+
elif op_name == "LOAD_NAME":
|
|
3459
|
+
name = const(operand)
|
|
3460
|
+
stack.append(_resolve(name))
|
|
3461
|
+
elif op_name == "STORE_NAME":
|
|
3462
|
+
name = const(operand)
|
|
3463
|
+
val = stack.pop() if stack else None
|
|
3464
|
+
_store(name, val)
|
|
3465
|
+
if self.use_memory_manager and val is not None:
|
|
3466
|
+
self._allocate_managed(val, name=name)
|
|
3467
|
+
elif op_name == "POP":
|
|
3468
|
+
if stack: stack.pop()
|
|
3469
|
+
elif op_name == "DUP":
|
|
3470
|
+
if stack: stack.append(stack[-1])
|
|
3471
|
+
elif op_name == "PRINT":
|
|
3472
|
+
val = stack.pop() if stack else None
|
|
3473
|
+
print(self._format_print_value(val))
|
|
3474
|
+
|
|
3475
|
+
# --- Function/Closure Ops ---
|
|
3476
|
+
elif op_name == "STORE_FUNC":
|
|
3477
|
+
name_idx, func_idx = operand
|
|
3478
|
+
name = const(name_idx)
|
|
3479
|
+
func_desc = const(func_idx)
|
|
3480
|
+
# Create func descriptor, capturing current VM as parent
|
|
3481
|
+
func_desc_copy = dict(func_desc) if isinstance(func_desc, dict) else {"bytecode": func_desc}
|
|
3482
|
+
closure_snapshot = {}
|
|
3483
|
+
# Snapshot current environment (excluding internal keys)
|
|
3484
|
+
for key, value in self.env.items():
|
|
3485
|
+
if isinstance(key, str) and key.startswith("_"):
|
|
3486
|
+
continue
|
|
3487
|
+
closure_snapshot[key] = value
|
|
3488
|
+
# Include existing closure cells if present
|
|
3489
|
+
for key, cell in self._closure_cells.items():
|
|
3490
|
+
if key not in closure_snapshot:
|
|
3491
|
+
closure_snapshot[key] = cell.value
|
|
3492
|
+
if closure_snapshot:
|
|
3493
|
+
func_desc_copy["closure_snapshot"] = closure_snapshot
|
|
3494
|
+
func_desc_copy["parent_vm"] = self
|
|
3495
|
+
self.env[name] = func_desc_copy
|
|
3496
|
+
self._bump_env_version(name, func_desc_copy)
|
|
3497
|
+
|
|
3498
|
+
elif op_name == "LOAD_REG":
|
|
3499
|
+
reg, const_idx = operand
|
|
3500
|
+
value = const(const_idx)
|
|
3501
|
+
if not hasattr(self, "_jit_registers"):
|
|
3502
|
+
self._jit_registers = {}
|
|
3503
|
+
self._jit_registers[reg] = value
|
|
3504
|
+
|
|
3505
|
+
elif op_name == "LOAD_VAR_REG":
|
|
3506
|
+
reg, name_idx = operand
|
|
3507
|
+
name = const(name_idx)
|
|
3508
|
+
value = _resolve(name)
|
|
3509
|
+
if not hasattr(self, "_jit_registers"):
|
|
3510
|
+
self._jit_registers = {}
|
|
3511
|
+
self._jit_registers[reg] = value
|
|
3512
|
+
|
|
3513
|
+
elif op_name == "STORE_REG":
|
|
3514
|
+
reg, name_idx = operand
|
|
3515
|
+
name = const(name_idx)
|
|
3516
|
+
value = getattr(self, "_jit_registers", {}).get(reg)
|
|
3517
|
+
_store(name, value)
|
|
3518
|
+
|
|
3519
|
+
elif op_name == "MOV_REG":
|
|
3520
|
+
dest, src = operand
|
|
3521
|
+
if not hasattr(self, "_jit_registers"):
|
|
3522
|
+
self._jit_registers = {}
|
|
3523
|
+
self._jit_registers[dest] = self._jit_registers.get(src)
|
|
3524
|
+
|
|
3525
|
+
elif op_name == "PUSH_REG":
|
|
3526
|
+
reg = operand if not isinstance(operand, (list, tuple)) else operand[0]
|
|
3527
|
+
value = getattr(self, "_jit_registers", {}).get(reg)
|
|
3528
|
+
stack.append(value)
|
|
3529
|
+
|
|
3530
|
+
elif op_name in ("ADD_REG", "SUB_REG", "MUL_REG", "DIV_REG", "MOD_REG"):
|
|
3531
|
+
dest, src1, src2 = operand
|
|
3532
|
+
regs = getattr(self, "_jit_registers", {})
|
|
3533
|
+
v1 = regs.get(src1)
|
|
3534
|
+
v2 = regs.get(src2)
|
|
3535
|
+
if op_name == "ADD_REG":
|
|
3536
|
+
res = v1 + v2
|
|
3537
|
+
elif op_name == "SUB_REG":
|
|
3538
|
+
res = v1 - v2
|
|
3539
|
+
elif op_name == "MUL_REG":
|
|
3540
|
+
res = v1 * v2
|
|
3541
|
+
elif op_name == "DIV_REG":
|
|
3542
|
+
res = v1 / v2 if v2 != 0 else 0
|
|
3543
|
+
else:
|
|
3544
|
+
res = v1 % v2 if v2 != 0 else 0
|
|
3545
|
+
if not hasattr(self, "_jit_registers"):
|
|
3546
|
+
self._jit_registers = {}
|
|
3547
|
+
self._jit_registers[dest] = res
|
|
3548
|
+
|
|
3549
|
+
elif op_name == "POW_REG":
|
|
3550
|
+
dest, src1, src2 = operand
|
|
3551
|
+
regs = getattr(self, "_jit_registers", {})
|
|
3552
|
+
v1 = regs.get(src1)
|
|
3553
|
+
v2 = regs.get(src2)
|
|
3554
|
+
res = v1 ** v2
|
|
3555
|
+
if not hasattr(self, "_jit_registers"):
|
|
3556
|
+
self._jit_registers = {}
|
|
3557
|
+
self._jit_registers[dest] = res
|
|
3558
|
+
|
|
3559
|
+
elif op_name == "NEG_REG":
|
|
3560
|
+
dest, src = operand
|
|
3561
|
+
regs = getattr(self, "_jit_registers", {})
|
|
3562
|
+
v1 = regs.get(src)
|
|
3563
|
+
res = -v1
|
|
3564
|
+
if not hasattr(self, "_jit_registers"):
|
|
3565
|
+
self._jit_registers = {}
|
|
3566
|
+
self._jit_registers[dest] = res
|
|
3567
|
+
|
|
3568
|
+
elif op_name in ("EQ_REG", "NEQ_REG", "LT_REG"):
|
|
3569
|
+
dest, src1, src2 = operand
|
|
3570
|
+
regs = getattr(self, "_jit_registers", {})
|
|
3571
|
+
v1 = regs.get(src1)
|
|
3572
|
+
v2 = regs.get(src2)
|
|
3573
|
+
if op_name == "EQ_REG":
|
|
3574
|
+
res = v1 == v2
|
|
3575
|
+
elif op_name == "NEQ_REG":
|
|
3576
|
+
res = v1 != v2
|
|
3577
|
+
else:
|
|
3578
|
+
res = v1 < v2
|
|
3579
|
+
if not hasattr(self, "_jit_registers"):
|
|
3580
|
+
self._jit_registers = {}
|
|
3581
|
+
self._jit_registers[dest] = res
|
|
3582
|
+
|
|
3583
|
+
elif op_name in ("GT_REG", "LTE_REG", "GTE_REG"):
|
|
3584
|
+
dest, src1, src2 = operand
|
|
3585
|
+
regs = getattr(self, "_jit_registers", {})
|
|
3586
|
+
v1 = regs.get(src1)
|
|
3587
|
+
v2 = regs.get(src2)
|
|
3588
|
+
if op_name == "GT_REG":
|
|
3589
|
+
res = v1 > v2
|
|
3590
|
+
elif op_name == "LTE_REG":
|
|
3591
|
+
res = v1 <= v2
|
|
3592
|
+
else:
|
|
3593
|
+
res = v1 >= v2
|
|
3594
|
+
if not hasattr(self, "_jit_registers"):
|
|
3595
|
+
self._jit_registers = {}
|
|
3596
|
+
self._jit_registers[dest] = res
|
|
3597
|
+
|
|
3598
|
+
elif op_name in ("AND_REG", "OR_REG"):
|
|
3599
|
+
dest, src1, src2 = operand
|
|
3600
|
+
regs = getattr(self, "_jit_registers", {})
|
|
3601
|
+
v1 = regs.get(src1)
|
|
3602
|
+
v2 = regs.get(src2)
|
|
3603
|
+
res = v1 and v2 if op_name == "AND_REG" else v1 or v2
|
|
3604
|
+
if not hasattr(self, "_jit_registers"):
|
|
3605
|
+
self._jit_registers = {}
|
|
3606
|
+
self._jit_registers[dest] = res
|
|
3607
|
+
|
|
3608
|
+
elif op_name == "NOT_REG":
|
|
3609
|
+
dest, src = operand
|
|
3610
|
+
regs = getattr(self, "_jit_registers", {})
|
|
3611
|
+
v1 = regs.get(src)
|
|
3612
|
+
res = not v1
|
|
3613
|
+
if not hasattr(self, "_jit_registers"):
|
|
3614
|
+
self._jit_registers = {}
|
|
3615
|
+
self._jit_registers[dest] = res
|
|
3616
|
+
|
|
3617
|
+
elif op_name == "POP_REG":
|
|
3618
|
+
reg = operand if not isinstance(operand, (list, tuple)) else operand[0]
|
|
3619
|
+
value = stack.pop() if stack else None
|
|
3620
|
+
if not hasattr(self, "_jit_registers"):
|
|
3621
|
+
self._jit_registers = {}
|
|
3622
|
+
self._jit_registers[reg] = value
|
|
3623
|
+
|
|
3624
|
+
elif op_name == "SPAWN_TASK":
|
|
3625
|
+
task_handle = None
|
|
3626
|
+
if isinstance(operand, tuple) and operand[0] == "CALL":
|
|
3627
|
+
fn_name = operand[1]; arg_count = operand[2]
|
|
3628
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1]
|
|
3629
|
+
fn = self.builtins.get(fn_name) or self.env.get(fn_name)
|
|
3630
|
+
coro = self._to_coro(fn, args)
|
|
3631
|
+
if self.async_optimizer:
|
|
3632
|
+
coro = self.async_optimizer.spawn(coro)
|
|
1294
3633
|
task = asyncio.create_task(coro)
|
|
3634
|
+
self._task_counter += 1
|
|
3635
|
+
tid = f"task_{self._task_counter}"
|
|
3636
|
+
self._tasks[tid] = task
|
|
3637
|
+
task_handle = tid
|
|
1295
3638
|
else:
|
|
3639
|
+
arg_count = int(operand) if operand is not None else 0
|
|
3640
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
3641
|
+
callable_obj = stack.pop() if stack else None
|
|
3642
|
+
coro = self._to_coro(callable_obj, args)
|
|
3643
|
+
if self.async_optimizer:
|
|
3644
|
+
coro = self.async_optimizer.spawn(coro)
|
|
1296
3645
|
task = asyncio.create_task(coro)
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
task_handle
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
3646
|
+
self._task_counter += 1
|
|
3647
|
+
tid = f"task_{self._task_counter}"
|
|
3648
|
+
self._tasks[tid] = task
|
|
3649
|
+
task_handle = tid
|
|
3650
|
+
stack.append(task_handle)
|
|
3651
|
+
|
|
3652
|
+
elif op_name == "TASK_JOIN":
|
|
3653
|
+
task_ref = stack.pop() if stack else None
|
|
3654
|
+
if isinstance(task_ref, str) and task_ref in self._tasks:
|
|
3655
|
+
res = await self._tasks[task_ref]
|
|
3656
|
+
stack.append(res)
|
|
3657
|
+
elif asyncio.iscoroutine(task_ref) or isinstance(task_ref, asyncio.Future):
|
|
3658
|
+
res = await task_ref
|
|
3659
|
+
stack.append(res)
|
|
3660
|
+
else:
|
|
3661
|
+
stack.append(task_ref)
|
|
3662
|
+
|
|
3663
|
+
elif op_name == "TASK_RESULT":
|
|
3664
|
+
task_ref = stack.pop() if stack else None
|
|
3665
|
+
if isinstance(task_ref, str) and task_ref in self._tasks:
|
|
3666
|
+
res = await self._tasks[task_ref]
|
|
3667
|
+
stack.append(res)
|
|
3668
|
+
elif asyncio.iscoroutine(task_ref) or isinstance(task_ref, asyncio.Future):
|
|
3669
|
+
res = await task_ref
|
|
3670
|
+
stack.append(res)
|
|
3671
|
+
else:
|
|
3672
|
+
stack.append(task_ref)
|
|
3673
|
+
|
|
3674
|
+
elif op_name == "LOCK_ACQUIRE":
|
|
3675
|
+
if not hasattr(self, "_locks"):
|
|
3676
|
+
self._locks = {}
|
|
3677
|
+
key = const(operand) if operand is not None else (stack.pop() if stack else None)
|
|
3678
|
+
key = _unwrap(key)
|
|
3679
|
+
lock = self._locks.get(key)
|
|
3680
|
+
if lock is None:
|
|
3681
|
+
import threading
|
|
3682
|
+
lock = threading.Lock()
|
|
3683
|
+
self._locks[key] = lock
|
|
3684
|
+
lock.acquire()
|
|
3685
|
+
|
|
3686
|
+
elif op_name == "LOCK_RELEASE":
|
|
3687
|
+
if not hasattr(self, "_locks"):
|
|
3688
|
+
self._locks = {}
|
|
3689
|
+
key = const(operand) if operand is not None else (stack.pop() if stack else None)
|
|
3690
|
+
key = _unwrap(key)
|
|
3691
|
+
lock = self._locks.get(key)
|
|
3692
|
+
if lock:
|
|
3693
|
+
lock.release()
|
|
3694
|
+
|
|
3695
|
+
elif op_name == "BARRIER":
|
|
3696
|
+
barrier_obj = stack.pop() if stack else None
|
|
3697
|
+
timeout = const(operand) if operand is not None else None
|
|
3698
|
+
if hasattr(barrier_obj, "wait"):
|
|
3699
|
+
try:
|
|
3700
|
+
res = barrier_obj.wait(timeout=timeout) if timeout is not None else barrier_obj.wait()
|
|
3701
|
+
except Exception as exc:
|
|
3702
|
+
res = exc
|
|
3703
|
+
stack.append(res)
|
|
3704
|
+
else:
|
|
3705
|
+
stack.append(None)
|
|
3706
|
+
|
|
3707
|
+
elif op_name == "ATOMIC_ADD":
|
|
3708
|
+
delta = stack.pop() if stack else 0
|
|
3709
|
+
key = stack.pop() if operand is None else const(operand)
|
|
3710
|
+
key = _unwrap(key)
|
|
3711
|
+
if not hasattr(self, "_atomic_lock"):
|
|
3712
|
+
import threading
|
|
3713
|
+
self._atomic_lock = threading.Lock()
|
|
3714
|
+
if "_atomic_state" not in self.env:
|
|
3715
|
+
self.env["_atomic_state"] = {}
|
|
3716
|
+
with self._atomic_lock:
|
|
3717
|
+
current = self.env["_atomic_state"].get(key, 0)
|
|
3718
|
+
new_val = current + delta
|
|
3719
|
+
self.env["_atomic_state"][key] = new_val
|
|
3720
|
+
stack.append(new_val)
|
|
3721
|
+
|
|
3722
|
+
elif op_name == "ATOMIC_CAS":
|
|
3723
|
+
new_val = stack.pop() if stack else None
|
|
3724
|
+
expected = stack.pop() if stack else None
|
|
3725
|
+
key = stack.pop() if operand is None else const(operand)
|
|
3726
|
+
key = _unwrap(key)
|
|
3727
|
+
if not hasattr(self, "_atomic_lock"):
|
|
3728
|
+
import threading
|
|
3729
|
+
self._atomic_lock = threading.Lock()
|
|
3730
|
+
if "_atomic_state" not in self.env:
|
|
3731
|
+
self.env["_atomic_state"] = {}
|
|
3732
|
+
with self._atomic_lock:
|
|
3733
|
+
current = self.env["_atomic_state"].get(key, None)
|
|
3734
|
+
ok = current == expected
|
|
3735
|
+
if ok:
|
|
3736
|
+
self.env["_atomic_state"][key] = new_val
|
|
3737
|
+
stack.append(ok)
|
|
3738
|
+
|
|
3739
|
+
elif op_name == "FOR_ITER":
|
|
3740
|
+
target = int(operand) if operand is not None else ip
|
|
3741
|
+
it = stack.pop() if stack else None
|
|
3742
|
+
if it is None:
|
|
3743
|
+
ip = target
|
|
3744
|
+
else:
|
|
3745
|
+
try:
|
|
3746
|
+
iterator = iter(it)
|
|
3747
|
+
value = next(iterator)
|
|
3748
|
+
stack.append(iterator)
|
|
3749
|
+
stack.append(value)
|
|
3750
|
+
except StopIteration:
|
|
3751
|
+
ip = target
|
|
3752
|
+
|
|
3753
|
+
elif op_name == "CALL_NAME":
|
|
3754
|
+
name_idx, arg_count = operand
|
|
3755
|
+
func_name = const(name_idx)
|
|
3756
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
3757
|
+
fn = _resolve(func_name) or self.builtins.get(func_name)
|
|
3758
|
+
if fn is None:
|
|
3759
|
+
res = self._call_fallback_builtin(func_name, args)
|
|
3760
|
+
else:
|
|
3761
|
+
res = await self._invoke_callable_or_funcdesc(fn, args)
|
|
3762
|
+
stack.append(res)
|
|
3763
|
+
|
|
3764
|
+
elif op_name == "CALL_BUILTIN":
|
|
3765
|
+
name_idx, arg_count = operand if isinstance(operand, (list, tuple)) else (operand, 0)
|
|
3766
|
+
func_name = const(name_idx)
|
|
3767
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
3768
|
+
fn = self.builtins.get(func_name)
|
|
3769
|
+
if fn is None:
|
|
3770
|
+
res = self._call_fallback_builtin(func_name, args)
|
|
3771
|
+
else:
|
|
3772
|
+
res = await self._invoke_callable_or_funcdesc(fn, args)
|
|
3773
|
+
stack.append(res)
|
|
3774
|
+
|
|
3775
|
+
elif op_name == "CALL_FUNC_CONST":
|
|
3776
|
+
if isinstance(operand, (list, tuple)):
|
|
3777
|
+
func_idx = operand[0]
|
|
3778
|
+
arg_count = operand[1] if len(operand) > 1 else 0
|
|
3779
|
+
else:
|
|
3780
|
+
func_idx = operand
|
|
3781
|
+
arg_count = 0
|
|
3782
|
+
func_desc = const(func_idx)
|
|
3783
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
3784
|
+
res = await self._invoke_callable_or_funcdesc(func_desc, args, is_constant=True)
|
|
3785
|
+
stack.append(res)
|
|
1308
3786
|
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
3787
|
+
elif op_name == "CALL_TOP":
|
|
3788
|
+
arg_count = operand
|
|
3789
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
3790
|
+
fn_obj = stack.pop() if stack else None
|
|
3791
|
+
res = await self._invoke_callable_or_funcdesc(fn_obj, args)
|
|
3792
|
+
stack.append(res)
|
|
3793
|
+
|
|
3794
|
+
# --- Arithmetic & Logic ---
|
|
3795
|
+
elif op_name == "ADD":
|
|
3796
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
3797
|
+
# Auto-unwrap evaluator objects
|
|
3798
|
+
if hasattr(a, 'value'): a = a.value
|
|
3799
|
+
if hasattr(b, 'value'): b = b.value
|
|
3800
|
+
stack.append(a + b)
|
|
3801
|
+
elif op_name == "SUB":
|
|
3802
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
3803
|
+
if hasattr(a, 'value'): a = a.value
|
|
3804
|
+
if hasattr(b, 'value'): b = b.value
|
|
3805
|
+
stack.append(a - b)
|
|
3806
|
+
elif op_name == "MUL":
|
|
3807
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
3808
|
+
if hasattr(a, 'value'): a = a.value
|
|
3809
|
+
if hasattr(b, 'value'): b = b.value
|
|
3810
|
+
stack.append(a * b)
|
|
3811
|
+
elif op_name == "DIV":
|
|
3812
|
+
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
3813
|
+
if hasattr(a, 'value'): a = a.value
|
|
3814
|
+
if hasattr(b, 'value'): b = b.value
|
|
3815
|
+
stack.append(a / b if b != 0 else 0)
|
|
3816
|
+
elif op_name == "MOD":
|
|
3817
|
+
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
3818
|
+
stack.append(a % b if b != 0 else 0)
|
|
3819
|
+
elif op_name == "POW":
|
|
3820
|
+
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
3821
|
+
stack.append(a ** b)
|
|
3822
|
+
elif op_name == "NEG":
|
|
3823
|
+
a = stack.pop() if stack else 0
|
|
3824
|
+
stack.append(-a)
|
|
3825
|
+
elif op_name == "EQ":
|
|
3826
|
+
b = stack.pop() if stack else None; a = stack.pop() if stack else None
|
|
3827
|
+
stack.append(a == b)
|
|
3828
|
+
elif op_name == "NEQ":
|
|
3829
|
+
b = stack.pop() if stack else None; a = stack.pop() if stack else None
|
|
3830
|
+
stack.append(a != b)
|
|
3831
|
+
elif op_name == "LT":
|
|
3832
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
3833
|
+
stack.append(a < b)
|
|
3834
|
+
elif op_name == "GT":
|
|
3835
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
3836
|
+
stack.append(a > b)
|
|
3837
|
+
elif op_name == "LTE":
|
|
3838
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
3839
|
+
stack.append(a <= b)
|
|
3840
|
+
elif op_name == "GTE":
|
|
3841
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
3842
|
+
stack.append(a >= b)
|
|
3843
|
+
elif op_name == "NOT":
|
|
3844
|
+
a = stack.pop() if stack else False
|
|
3845
|
+
stack.append(not a)
|
|
3846
|
+
|
|
3847
|
+
# --- Control Flow ---
|
|
3848
|
+
elif op_name == "JUMP":
|
|
3849
|
+
ip = operand
|
|
3850
|
+
elif op_name == "JUMP_IF_FALSE":
|
|
3851
|
+
cond = stack.pop() if stack else None
|
|
3852
|
+
if not cond: ip = operand
|
|
3853
|
+
elif op_name == "RETURN":
|
|
3854
|
+
return stack.pop() if stack else None
|
|
3855
|
+
|
|
3856
|
+
# --- Collections ---
|
|
3857
|
+
elif op_name == "BUILD_LIST":
|
|
3858
|
+
count = operand if operand is not None else 0
|
|
3859
|
+
elements = [None] * count
|
|
3860
|
+
for i in range(count - 1, -1, -1):
|
|
3861
|
+
elements[i] = stack.pop() if stack else None
|
|
3862
|
+
stack.append(elements)
|
|
3863
|
+
elif op_name == "BUILD_MAP":
|
|
3864
|
+
count = operand if operand is not None else 0
|
|
3865
|
+
result = {}
|
|
3866
|
+
for _ in range(count):
|
|
3867
|
+
val = stack.pop() if stack else None
|
|
3868
|
+
key = stack.pop() if stack else None
|
|
3869
|
+
result[key] = val
|
|
3870
|
+
stack.append(result)
|
|
3871
|
+
elif op_name == "BUILD_SET":
|
|
3872
|
+
count = operand if operand is not None else 0
|
|
3873
|
+
elements = [stack_pop() for _ in range(count)][::-1]
|
|
3874
|
+
stack.append(set(elements))
|
|
3875
|
+
elif op_name == "INDEX":
|
|
3876
|
+
idx = stack.pop() if stack else None
|
|
3877
|
+
obj = stack.pop() if stack else None
|
|
3878
|
+
try:
|
|
3879
|
+
if isinstance(obj, ZList):
|
|
3880
|
+
stack.append(obj.get(idx))
|
|
3881
|
+
elif isinstance(obj, ZMap):
|
|
3882
|
+
stack.append(obj.get(idx))
|
|
3883
|
+
elif isinstance(obj, ZString):
|
|
3884
|
+
stack.append(obj[idx])
|
|
1316
3885
|
else:
|
|
1317
|
-
|
|
1318
|
-
# Push back any non-task values we skipped
|
|
1319
|
-
for val in reversed(temp_stack):
|
|
3886
|
+
val = obj[idx] if obj is not None and idx is not None else None
|
|
1320
3887
|
stack.append(val)
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
3888
|
+
except (IndexError, KeyError, TypeError):
|
|
3889
|
+
stack.append(None)
|
|
3890
|
+
elif op_name == "SLICE":
|
|
3891
|
+
end = _unwrap(stack.pop() if stack else None)
|
|
3892
|
+
start = _unwrap(stack.pop() if stack else None)
|
|
3893
|
+
obj = stack.pop() if stack else None
|
|
3894
|
+
try:
|
|
3895
|
+
if isinstance(obj, ZList):
|
|
3896
|
+
stack.append(ZList(obj.elements[start:end]))
|
|
3897
|
+
elif isinstance(obj, ZString):
|
|
3898
|
+
stack.append(ZString(obj.value[start:end]))
|
|
3899
|
+
else:
|
|
3900
|
+
stack.append(obj[start:end] if obj is not None else None)
|
|
3901
|
+
except Exception:
|
|
3902
|
+
stack.append(None)
|
|
3903
|
+
elif op_name == "GET_LENGTH":
|
|
3904
|
+
obj = stack.pop() if stack else None
|
|
3905
|
+
try:
|
|
3906
|
+
if obj is None:
|
|
3907
|
+
stack.append(0)
|
|
3908
|
+
elif isinstance(obj, ZList):
|
|
3909
|
+
stack.append(len(obj.elements))
|
|
3910
|
+
elif isinstance(obj, ZMap):
|
|
3911
|
+
stack.append(len(obj.pairs))
|
|
3912
|
+
elif isinstance(obj, ZString):
|
|
3913
|
+
stack.append(len(obj.value))
|
|
3914
|
+
elif hasattr(obj, '__len__'):
|
|
3915
|
+
stack.append(len(obj))
|
|
3916
|
+
else:
|
|
3917
|
+
stack.append(0)
|
|
3918
|
+
except (TypeError, AttributeError):
|
|
3919
|
+
stack.append(0)
|
|
3920
|
+
|
|
3921
|
+
# --- Async & Events ---
|
|
3922
|
+
elif op_name == "SPAWN":
|
|
3923
|
+
# operand: tuple ("CALL", func_name, arg_count) OR index
|
|
3924
|
+
task_handle = None
|
|
3925
|
+
if isinstance(operand, tuple) and operand[0] == "CALL":
|
|
3926
|
+
fn_name = operand[1]; arg_count = operand[2]
|
|
3927
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1]
|
|
3928
|
+
fn = self.builtins.get(fn_name) or self.env.get(fn_name)
|
|
3929
|
+
coro = self._to_coro(fn, args)
|
|
3930
|
+
|
|
1324
3931
|
# Use async optimizer if available
|
|
1325
3932
|
if self.async_optimizer:
|
|
1326
|
-
|
|
3933
|
+
coro = self.async_optimizer.spawn(coro)
|
|
3934
|
+
task = asyncio.create_task(coro)
|
|
1327
3935
|
else:
|
|
1328
|
-
|
|
1329
|
-
|
|
3936
|
+
task = asyncio.create_task(coro)
|
|
3937
|
+
|
|
3938
|
+
self._task_counter += 1
|
|
3939
|
+
tid = f"task_{self._task_counter}"
|
|
3940
|
+
self._tasks[tid] = task
|
|
3941
|
+
task_handle = tid
|
|
3942
|
+
stack.append(task_handle)
|
|
3943
|
+
|
|
3944
|
+
elif op_name == "SPAWN_CALL":
|
|
3945
|
+
task_handle = None
|
|
3946
|
+
if isinstance(operand, (list, tuple)) and operand:
|
|
3947
|
+
name_idx = operand[0]
|
|
3948
|
+
arg_count = operand[1] if len(operand) > 1 else 0
|
|
3949
|
+
fn_name = const(name_idx)
|
|
3950
|
+
args = [stack.pop() if stack else None for _ in range(arg_count)][::-1] if arg_count else []
|
|
3951
|
+
fn = self.builtins.get(fn_name) or self.env.get(fn_name)
|
|
3952
|
+
coro = self._to_coro(fn, args)
|
|
3953
|
+
if self.async_optimizer:
|
|
3954
|
+
coro = self.async_optimizer.spawn(coro)
|
|
3955
|
+
task = asyncio.create_task(coro)
|
|
3956
|
+
self._task_counter += 1
|
|
3957
|
+
tid = f"task_{self._task_counter}"
|
|
3958
|
+
self._tasks[tid] = task
|
|
3959
|
+
task_handle = tid
|
|
3960
|
+
stack.append(task_handle)
|
|
3961
|
+
|
|
3962
|
+
elif op_name == "AWAIT":
|
|
3963
|
+
# Keep popping until we find a task to await
|
|
3964
|
+
result_found = False
|
|
3965
|
+
temp_stack = self.allocate_list(0)
|
|
3966
|
+
|
|
3967
|
+
while stack and not result_found:
|
|
3968
|
+
top = stack.pop()
|
|
3969
|
+
|
|
3970
|
+
if isinstance(top, str) and top in self._tasks:
|
|
3971
|
+
# Use async optimizer if available
|
|
3972
|
+
if self.async_optimizer:
|
|
3973
|
+
res = await self.async_optimizer.await_optimized(self._tasks[top])
|
|
3974
|
+
else:
|
|
3975
|
+
res = await self._tasks[top]
|
|
3976
|
+
# Push back any non-task values we skipped
|
|
3977
|
+
for val in reversed(temp_stack):
|
|
3978
|
+
stack.append(val)
|
|
3979
|
+
stack.append(res)
|
|
3980
|
+
result_found = True
|
|
3981
|
+
elif asyncio.iscoroutine(top) or isinstance(top, asyncio.Future):
|
|
3982
|
+
# Use async optimizer if available
|
|
3983
|
+
if self.async_optimizer:
|
|
3984
|
+
res = await self.async_optimizer.await_optimized(top)
|
|
3985
|
+
else:
|
|
3986
|
+
res = await top
|
|
3987
|
+
# Push back any non-task values we skipped
|
|
3988
|
+
for val in reversed(temp_stack):
|
|
3989
|
+
stack.append(val)
|
|
3990
|
+
stack.append(res)
|
|
3991
|
+
result_found = True
|
|
3992
|
+
else:
|
|
3993
|
+
# Not a task, save it and keep looking
|
|
3994
|
+
temp_stack.append(top)
|
|
3995
|
+
|
|
3996
|
+
# If no task was found, put everything back
|
|
3997
|
+
if not result_found:
|
|
1330
3998
|
for val in reversed(temp_stack):
|
|
1331
3999
|
stack.append(val)
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
self.
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
pk = stack.pop(); msg = stack.pop(); sig = stack.pop()
|
|
1398
|
-
verify_fn = self.builtins.get("verify_sig") or self.env.get("verify_sig")
|
|
1399
|
-
if verify_fn:
|
|
1400
|
-
res = await self._invoke_callable_or_funcdesc(verify_fn, [sig, msg, pk])
|
|
1401
|
-
stack.append(res)
|
|
4000
|
+
|
|
4001
|
+
if temp_stack:
|
|
4002
|
+
temp_stack.clear()
|
|
4003
|
+
self.release_list(temp_stack)
|
|
4004
|
+
|
|
4005
|
+
elif op_name == "REGISTER_EVENT":
|
|
4006
|
+
event_parts = operand if isinstance(operand, (list, tuple)) else (operand,)
|
|
4007
|
+
event_name = const(event_parts[0]) if event_parts else None
|
|
4008
|
+
handler = const(event_parts[1]) if len(event_parts) > 1 else None
|
|
4009
|
+
if event_name is not None:
|
|
4010
|
+
handlers = self._events.setdefault(event_name, [])
|
|
4011
|
+
if handler and handler not in handlers:
|
|
4012
|
+
handlers.append(handler)
|
|
4013
|
+
|
|
4014
|
+
elif op_name == "EMIT_EVENT":
|
|
4015
|
+
event_ref = const(operand[0]) if operand else None
|
|
4016
|
+
payload_ref = None
|
|
4017
|
+
event_name = event_ref
|
|
4018
|
+
if isinstance(event_ref, (list, tuple)) and event_ref:
|
|
4019
|
+
event_name = event_ref[0]
|
|
4020
|
+
if len(event_ref) > 1:
|
|
4021
|
+
payload_ref = event_ref[1]
|
|
4022
|
+
|
|
4023
|
+
payload = None
|
|
4024
|
+
if stack:
|
|
4025
|
+
payload = stack.pop()
|
|
4026
|
+
elif payload_ref is not None:
|
|
4027
|
+
payload = const(payload_ref)
|
|
4028
|
+
elif isinstance(operand, (list, tuple)) and len(operand) > 1:
|
|
4029
|
+
payload = const(operand[1])
|
|
4030
|
+
|
|
4031
|
+
payload = _unwrap(payload)
|
|
4032
|
+
|
|
4033
|
+
handlers = self._events.get(event_name, [])
|
|
4034
|
+
for h in handlers:
|
|
4035
|
+
fn = self.builtins.get(h) or self.env.get(h)
|
|
4036
|
+
if fn is None:
|
|
4037
|
+
continue
|
|
4038
|
+
await self._call_builtin_async_obj(fn, [payload], wrap_args=False)
|
|
4039
|
+
|
|
4040
|
+
elif op_name == "IMPORT":
|
|
4041
|
+
mod_name = const(operand[0])
|
|
4042
|
+
alias = const(operand[1]) if isinstance(operand, (list,tuple)) and len(operand) > 1 else ""
|
|
4043
|
+
names = const(operand[2]) if isinstance(operand, (list, tuple)) and len(operand) > 2 else []
|
|
4044
|
+
is_named = const(operand[3]) if isinstance(operand, (list, tuple)) and len(operand) > 3 else False
|
|
4045
|
+
self._execute_import(mod_name, alias=alias or "", names=names, is_named=bool(is_named))
|
|
4046
|
+
|
|
4047
|
+
elif op_name == "EXPORT":
|
|
4048
|
+
name = None
|
|
4049
|
+
value = None
|
|
4050
|
+
if isinstance(operand, (list, tuple)) and operand:
|
|
4051
|
+
name = const(operand[0])
|
|
4052
|
+
if len(operand) > 1:
|
|
4053
|
+
value = const(operand[1])
|
|
4054
|
+
if name is None:
|
|
4055
|
+
name = stack.pop() if stack else None
|
|
4056
|
+
if value is None:
|
|
4057
|
+
value = stack.pop() if stack else None
|
|
4058
|
+
|
|
4059
|
+
export_fn = getattr(self.env, "export", None)
|
|
4060
|
+
if callable(export_fn):
|
|
4061
|
+
try:
|
|
4062
|
+
export_fn(name, value)
|
|
4063
|
+
except Exception:
|
|
4064
|
+
pass
|
|
1402
4065
|
else:
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
stack.append(sig == expected)
|
|
1406
|
-
else:
|
|
1407
|
-
stack.append(False)
|
|
4066
|
+
self.env[name] = value
|
|
4067
|
+
self._bump_env_version(name, value)
|
|
1408
4068
|
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
if len(hashes) % 2 != 0: hashes.append(hashes[-1])
|
|
1425
|
-
new_hashes = []
|
|
1426
|
-
for i in range(0, len(hashes), 2):
|
|
1427
|
-
combined = (hashes[i] + hashes[i+1]).encode('utf-8')
|
|
1428
|
-
new_hashes.append(hashlib.sha256(combined).hexdigest())
|
|
1429
|
-
hashes = new_hashes
|
|
1430
|
-
stack.append(hashes[0] if hashes else "")
|
|
1431
|
-
|
|
1432
|
-
elif op_name == "STATE_READ":
|
|
1433
|
-
key = const(operand)
|
|
1434
|
-
stack.append(self.env.setdefault("_blockchain_state", {}).get(key))
|
|
1435
|
-
|
|
1436
|
-
elif op_name == "STATE_WRITE":
|
|
1437
|
-
key = const(operand)
|
|
1438
|
-
val = stack.pop() if stack else None
|
|
1439
|
-
if self.env.get("_in_transaction", False):
|
|
1440
|
-
self.env.setdefault("_tx_pending_state", {})[key] = val
|
|
1441
|
-
else:
|
|
1442
|
-
self.env.setdefault("_blockchain_state", {})[key] = val
|
|
4069
|
+
elif op_name == "WRITE":
|
|
4070
|
+
payload = stack.pop() if stack else None
|
|
4071
|
+
path = stack.pop() if stack else None
|
|
4072
|
+
try:
|
|
4073
|
+
if path is not None:
|
|
4074
|
+
with open(path, "w") as f:
|
|
4075
|
+
if isinstance(payload, bytes):
|
|
4076
|
+
f.write(payload.decode("utf-8"))
|
|
4077
|
+
else:
|
|
4078
|
+
f.write(str(payload) if payload is not None else "")
|
|
4079
|
+
stack.append(True)
|
|
4080
|
+
else:
|
|
4081
|
+
stack.append(False)
|
|
4082
|
+
except Exception:
|
|
4083
|
+
stack.append(False)
|
|
1443
4084
|
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
4085
|
+
elif op_name == "DEFINE_SCREEN":
|
|
4086
|
+
if isinstance(operand, (list, tuple)) and len(operand) >= 2:
|
|
4087
|
+
name = const(operand[0])
|
|
4088
|
+
props = const(operand[1])
|
|
4089
|
+
else:
|
|
4090
|
+
props = stack.pop() if stack else None
|
|
4091
|
+
name = stack.pop() if stack else None
|
|
4092
|
+
if _BACKEND_AVAILABLE:
|
|
4093
|
+
_BACKEND.define_screen(name, props)
|
|
4094
|
+
else:
|
|
4095
|
+
key = _unwrap(name)
|
|
4096
|
+
self.env.setdefault("screens", {})[key] = props
|
|
1449
4097
|
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
4098
|
+
elif op_name == "DEFINE_COMPONENT":
|
|
4099
|
+
if isinstance(operand, (list, tuple)) and len(operand) >= 2:
|
|
4100
|
+
name = const(operand[0])
|
|
4101
|
+
props = const(operand[1])
|
|
4102
|
+
else:
|
|
4103
|
+
props = stack.pop() if stack else None
|
|
4104
|
+
name = stack.pop() if stack else None
|
|
4105
|
+
if _BACKEND_AVAILABLE:
|
|
4106
|
+
_BACKEND.define_component(name, props)
|
|
4107
|
+
else:
|
|
4108
|
+
key = _unwrap(name)
|
|
4109
|
+
self.env.setdefault("components", {})[key] = props
|
|
1456
4110
|
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
4111
|
+
elif op_name == "DEFINE_THEME":
|
|
4112
|
+
if isinstance(operand, (list, tuple)) and len(operand) >= 2:
|
|
4113
|
+
name = const(operand[0])
|
|
4114
|
+
props = const(operand[1])
|
|
4115
|
+
else:
|
|
4116
|
+
props = stack.pop() if stack else None
|
|
4117
|
+
name = stack.pop() if stack else None
|
|
4118
|
+
key = _unwrap(name)
|
|
4119
|
+
self.env.setdefault("themes", {})[key] = props
|
|
4120
|
+
|
|
4121
|
+
elif op_name == "DEFINE_ENUM":
|
|
4122
|
+
enum_name = _unwrap(const(operand[0]))
|
|
4123
|
+
enum_map = const(operand[1])
|
|
4124
|
+
self.env.setdefault("enums", {})[enum_name] = enum_map
|
|
4125
|
+
self.env[enum_name] = enum_map
|
|
4126
|
+
self._bump_env_version(enum_name, enum_map)
|
|
4127
|
+
|
|
4128
|
+
elif op_name == "DEFINE_PROTOCOL":
|
|
4129
|
+
proto_name = _unwrap(const(operand[0]))
|
|
4130
|
+
proto_spec = const(operand[1])
|
|
4131
|
+
self.env.setdefault("protocols", {})[proto_name] = proto_spec
|
|
4132
|
+
self.env[proto_name] = proto_spec
|
|
4133
|
+
self._bump_env_version(proto_name, proto_spec)
|
|
4134
|
+
|
|
4135
|
+
elif op_name == "ASSERT_PROTOCOL":
|
|
4136
|
+
obj_name = const(operand[0])
|
|
4137
|
+
spec = const(operand[1])
|
|
4138
|
+
obj = self.env.get(obj_name)
|
|
4139
|
+
ok = True
|
|
4140
|
+
missing = []
|
|
4141
|
+
for m in spec.get("methods", []):
|
|
4142
|
+
if not hasattr(obj, m):
|
|
4143
|
+
ok = False; missing.append(m)
|
|
4144
|
+
stack.append((ok, missing))
|
|
4145
|
+
|
|
4146
|
+
# --- Blockchain Specific Opcodes ---
|
|
4147
|
+
|
|
4148
|
+
elif op_name == "HASH_BLOCK":
|
|
4149
|
+
block_data = stack.pop() if stack else ""
|
|
4150
|
+
if isinstance(block_data, dict):
|
|
4151
|
+
import json; block_data = json.dumps(block_data, sort_keys=True)
|
|
4152
|
+
if not isinstance(block_data, (bytes, str)): block_data = str(block_data)
|
|
4153
|
+
if isinstance(block_data, str): block_data = block_data.encode('utf-8')
|
|
4154
|
+
try:
|
|
4155
|
+
from Crypto.Hash import keccak as _keccak_mod
|
|
4156
|
+
h = _keccak_mod.new(digest_bits=256, data=block_data)
|
|
4157
|
+
stack.append(h.hexdigest())
|
|
4158
|
+
except ImportError:
|
|
4159
|
+
stack.append(hashlib.sha256(block_data).hexdigest())
|
|
4160
|
+
|
|
4161
|
+
elif op_name == "VERIFY_SIGNATURE":
|
|
4162
|
+
if len(stack) >= 3:
|
|
4163
|
+
pk = stack.pop(); msg = stack.pop(); sig = stack.pop()
|
|
4164
|
+
verify_fn = self.builtins.get("verify_sig") or self.env.get("verify_sig")
|
|
4165
|
+
if verify_fn:
|
|
4166
|
+
res = await self._invoke_callable_or_funcdesc(verify_fn, [sig, msg, pk])
|
|
4167
|
+
stack.append(res)
|
|
4168
|
+
else:
|
|
4169
|
+
# Use real CryptoPlugin verification when available.
|
|
4170
|
+
# No SHA-256 fallback — forging a SHA-256 hash is trivial.
|
|
4171
|
+
try:
|
|
4172
|
+
from ..blockchain.crypto import CryptoPlugin
|
|
4173
|
+
sig_str = sig.value if hasattr(sig, 'value') else str(sig)
|
|
4174
|
+
msg_str = msg.value if hasattr(msg, 'value') else str(msg)
|
|
4175
|
+
pk_str = pk.value if hasattr(pk, 'value') else str(pk)
|
|
4176
|
+
stack.append(CryptoPlugin.verify_signature(msg_str, sig_str, pk_str))
|
|
4177
|
+
except ImportError:
|
|
4178
|
+
# CryptoPlugin unavailable — always reject
|
|
4179
|
+
stack.append(False)
|
|
4180
|
+
else:
|
|
4181
|
+
stack.append(False)
|
|
4182
|
+
|
|
4183
|
+
elif op_name == "MERKLE_ROOT":
|
|
4184
|
+
leaf_count = operand if operand is not None else 0
|
|
4185
|
+
if leaf_count <= 0 or len(stack) < leaf_count:
|
|
4186
|
+
stack.append("")
|
|
4187
|
+
else:
|
|
4188
|
+
leaves = [stack.pop() for _ in range(leaf_count)][::-1] if len(stack) >= leaf_count else []
|
|
4189
|
+
hashes = []
|
|
4190
|
+
for leaf in leaves:
|
|
4191
|
+
if isinstance(leaf, dict):
|
|
4192
|
+
import json; leaf = json.dumps(leaf, sort_keys=True)
|
|
4193
|
+
if not isinstance(leaf, (str, bytes)): leaf = str(leaf)
|
|
4194
|
+
if isinstance(leaf, str): leaf = leaf.encode('utf-8')
|
|
4195
|
+
hashes.append(hashlib.sha256(leaf).hexdigest())
|
|
4196
|
+
|
|
4197
|
+
while len(hashes) > 1:
|
|
4198
|
+
if len(hashes) % 2 != 0: hashes.append(hashes[-1])
|
|
4199
|
+
new_hashes = []
|
|
4200
|
+
for i in range(0, len(hashes), 2):
|
|
4201
|
+
combined = (hashes[i] + hashes[i+1]).encode('utf-8')
|
|
4202
|
+
new_hashes.append(hashlib.sha256(combined).hexdigest())
|
|
4203
|
+
hashes = new_hashes
|
|
4204
|
+
stack.append(hashes[0] if hashes else "")
|
|
4205
|
+
|
|
4206
|
+
elif op_name == "STATE_READ":
|
|
4207
|
+
if operand is None:
|
|
4208
|
+
key = _unwrap(stack.pop() if stack else None)
|
|
4209
|
+
else:
|
|
4210
|
+
key = const(operand)
|
|
4211
|
+
stack.append(self.env.setdefault("_blockchain_state", {}).get(key))
|
|
4212
|
+
|
|
4213
|
+
elif op_name == "STATE_WRITE":
|
|
4214
|
+
val = _unwrap(stack.pop() if stack else None)
|
|
4215
|
+
if operand is None:
|
|
4216
|
+
key = _unwrap(stack.pop() if stack else None)
|
|
4217
|
+
else:
|
|
4218
|
+
key = const(operand)
|
|
4219
|
+
if self.env.get("_in_transaction", False):
|
|
4220
|
+
self.env.setdefault("_tx_pending_state", {})[key] = val
|
|
4221
|
+
else:
|
|
4222
|
+
self.env.setdefault("_blockchain_state", {})[key] = val
|
|
4223
|
+
|
|
4224
|
+
elif op_name == "TX_BEGIN":
|
|
4225
|
+
# Support nested transactions via a stack
|
|
4226
|
+
tx_stack = self.env.setdefault("_tx_stack", [])
|
|
4227
|
+
tx_stack.append({
|
|
4228
|
+
"snapshot": dict(self.env.get("_blockchain_state", {})),
|
|
4229
|
+
"pending": dict(self.env.get("_tx_pending_state", {})),
|
|
4230
|
+
})
|
|
4231
|
+
self.env["_in_transaction"] = True
|
|
1461
4232
|
self.env["_tx_pending_state"] = {}
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
if
|
|
1471
|
-
|
|
4233
|
+
self.env["_tx_snapshot"] = dict(self.env.get("_blockchain_state", {}))
|
|
4234
|
+
if self.use_memory_manager: self.env["_tx_memory_snapshot"] = dict(self._managed_objects)
|
|
4235
|
+
|
|
4236
|
+
elif op_name == "SETUP_TRY":
|
|
4237
|
+
handler = int(operand) if operand is not None else ip
|
|
4238
|
+
try_stack.append(handler)
|
|
4239
|
+
|
|
4240
|
+
elif op_name == "POP_TRY":
|
|
4241
|
+
if try_stack:
|
|
4242
|
+
try_stack.pop()
|
|
4243
|
+
|
|
4244
|
+
elif op_name == "THROW":
|
|
4245
|
+
exc = stack.pop() if stack else None
|
|
4246
|
+
if try_stack:
|
|
4247
|
+
handler = try_stack.pop()
|
|
4248
|
+
stack.append(exc)
|
|
4249
|
+
ip = handler
|
|
4250
|
+
else:
|
|
4251
|
+
msg = exc.value if hasattr(exc, "value") else exc
|
|
4252
|
+
raise ZEvaluationError(str(msg))
|
|
4253
|
+
|
|
4254
|
+
elif op_name == "TX_COMMIT":
|
|
4255
|
+
if self.env.get("_in_transaction", False):
|
|
4256
|
+
self.env.setdefault("_blockchain_state", {}).update(self.env.get("_tx_pending_state", {}))
|
|
4257
|
+
self.env["_tx_pending_state"] = {}
|
|
4258
|
+
self.env.pop("_tx_snapshot", None)
|
|
4259
|
+
if "_tx_memory_snapshot" in self.env: del self.env["_tx_memory_snapshot"]
|
|
4260
|
+
# Restore outer TX context if nested
|
|
4261
|
+
tx_stack = self.env.get("_tx_stack", [])
|
|
4262
|
+
if tx_stack:
|
|
4263
|
+
tx_stack.pop()
|
|
4264
|
+
self.env["_in_transaction"] = bool(tx_stack)
|
|
4265
|
+
|
|
4266
|
+
elif op_name == "TX_REVERT":
|
|
4267
|
+
if self.env.get("_in_transaction", False):
|
|
4268
|
+
self.env["_blockchain_state"] = dict(self.env.get("_tx_snapshot", {}))
|
|
4269
|
+
self.env["_tx_pending_state"] = {}
|
|
4270
|
+
self.env.pop("_tx_snapshot", None)
|
|
4271
|
+
if self.use_memory_manager and "_tx_memory_snapshot" in self.env:
|
|
4272
|
+
self._managed_objects = dict(self.env["_tx_memory_snapshot"])
|
|
4273
|
+
del self.env["_tx_memory_snapshot"]
|
|
4274
|
+
# Restore outer TX context if nested
|
|
4275
|
+
tx_stack = self.env.get("_tx_stack", [])
|
|
4276
|
+
if tx_stack:
|
|
4277
|
+
outer = tx_stack.pop()
|
|
4278
|
+
if tx_stack:
|
|
4279
|
+
self.env["_tx_snapshot"] = outer["snapshot"]
|
|
4280
|
+
self.env["_tx_pending_state"] = outer["pending"]
|
|
4281
|
+
self.env["_in_transaction"] = bool(self.env.get("_tx_stack", []))
|
|
4282
|
+
|
|
4283
|
+
elif op_name == "ENABLE_ERROR_MODE":
|
|
4284
|
+
self.env["_continue_on_error"] = True
|
|
4285
|
+
if self.debug: print("[VM] Error Recovery Mode ENABLED")
|
|
4286
|
+
|
|
4287
|
+
elif op_name == "GAS_CHARGE":
|
|
4288
|
+
amount = operand if operand is not None else 0
|
|
4289
|
+
# Delegate to the unified GasMetering system when available
|
|
4290
|
+
if self.gas_metering is not None:
|
|
4291
|
+
if not self.gas_metering.consume("GAS_CHARGE", amount=amount):
|
|
4292
|
+
if self.env.get("_in_transaction", False):
|
|
4293
|
+
self.env["_blockchain_state"] = dict(self.env.get("_tx_snapshot", {}))
|
|
4294
|
+
self.env["_in_transaction"] = False
|
|
4295
|
+
self.env.pop("_tx_snapshot", None)
|
|
4296
|
+
raise OutOfGasError(
|
|
4297
|
+
self.gas_metering.gas_used,
|
|
4298
|
+
self.gas_metering.gas_limit,
|
|
4299
|
+
"GAS_CHARGE"
|
|
4300
|
+
)
|
|
4301
|
+
# Sync env-based counter for backward compat
|
|
4302
|
+
if "_gas_remaining" in self.env:
|
|
4303
|
+
self.env["_gas_remaining"] = max(0, self.env["_gas_remaining"] - amount)
|
|
4304
|
+
else:
|
|
4305
|
+
# Fallback to env-based tracking when no GasMetering
|
|
4306
|
+
current = self.env.get("_gas_remaining", float('inf'))
|
|
4307
|
+
if current != float('inf'):
|
|
4308
|
+
new_gas = current - amount
|
|
4309
|
+
if new_gas < 0:
|
|
4310
|
+
if self.env.get("_in_transaction", False):
|
|
4311
|
+
self.env["_blockchain_state"] = dict(self.env.get("_tx_snapshot", {}))
|
|
4312
|
+
self.env["_in_transaction"] = False
|
|
4313
|
+
raise ZEvaluationError(
|
|
4314
|
+
f"Out of gas: required {amount}, remaining {current}")
|
|
4315
|
+
self.env["_gas_remaining"] = new_gas
|
|
4316
|
+
|
|
4317
|
+
elif op_name == "REQUIRE":
|
|
4318
|
+
message = stack.pop() if stack else "Requirement failed"
|
|
4319
|
+
if hasattr(message, 'value'): message = message.value
|
|
4320
|
+
condition = stack.pop() if stack else False
|
|
4321
|
+
cond_val = condition.value if hasattr(condition, 'value') else condition
|
|
4322
|
+
|
|
4323
|
+
if not cond_val:
|
|
1472
4324
|
if self.env.get("_in_transaction", False):
|
|
1473
4325
|
self.env["_blockchain_state"] = dict(self.env.get("_tx_snapshot", {}))
|
|
1474
4326
|
self.env["_in_transaction"] = False
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
4327
|
+
self.env["_tx_pending_state"] = {}
|
|
4328
|
+
self.env.pop("_tx_snapshot", None)
|
|
4329
|
+
self.env.pop("_tx_memory_snapshot", None)
|
|
4330
|
+
# Clean up TX stack
|
|
4331
|
+
tx_stack = self.env.get("_tx_stack", [])
|
|
4332
|
+
if tx_stack: tx_stack.pop()
|
|
4333
|
+
self.env["_in_transaction"] = bool(tx_stack)
|
|
4334
|
+
raise ZEvaluationError(f"Requirement failed: {message}")
|
|
4335
|
+
|
|
4336
|
+
elif op_name == "DEFINE_CONTRACT":
|
|
4337
|
+
contract_obj = self._build_smart_contract(
|
|
4338
|
+
operand, stack, lambda: stack.pop() if stack else None,
|
|
4339
|
+
lambda idx: consts[idx] if isinstance(idx, int) and 0 <= idx < len(consts) else idx,
|
|
4340
|
+
self.env
|
|
4341
|
+
)
|
|
4342
|
+
stack.append(contract_obj)
|
|
4343
|
+
|
|
4344
|
+
elif op_name == "DEFINE_ENTITY":
|
|
4345
|
+
member_count = operand
|
|
4346
|
+
members = {}
|
|
4347
|
+
for _ in range(member_count):
|
|
4348
|
+
key_obj = stack.pop() if stack else None
|
|
4349
|
+
val_obj = stack.pop() if stack else None
|
|
4350
|
+
key_str = key_obj.value if hasattr(key_obj, 'value') else str(key_obj)
|
|
4351
|
+
members[key_str] = val_obj
|
|
4352
|
+
|
|
4353
|
+
name_obj = stack.pop() if stack else None
|
|
4354
|
+
# Create Entity (using Map for now, can be specialized Entity class later)
|
|
4355
|
+
members['_type'] = 'entity'
|
|
4356
|
+
members['_name'] = name_obj.value if hasattr(name_obj, 'value') else str(name_obj)
|
|
4357
|
+
stack.append(ZMap(members))
|
|
4358
|
+
|
|
4359
|
+
elif op_name == "DEFINE_CAPABILITY":
|
|
4360
|
+
name = stack.pop() if stack else None
|
|
4361
|
+
definition = stack.pop() if stack else {}
|
|
4362
|
+
if hasattr(name, 'value'): name = name.value
|
|
4363
|
+
self.env.setdefault("_capabilities", {})[name] = definition
|
|
4364
|
+
|
|
4365
|
+
elif op_name == "GRANT_CAPABILITY":
|
|
4366
|
+
count = operand
|
|
4367
|
+
caps = [stack.pop() for _ in range(count)][::-1]
|
|
4368
|
+
entity_name = stack.pop() if stack else None
|
|
4369
|
+
if hasattr(entity_name, 'value'): entity_name = entity_name.value
|
|
4370
|
+
|
|
4371
|
+
grants = self.env.setdefault("_grants", {})
|
|
4372
|
+
entity_grants = grants.setdefault(entity_name, set())
|
|
4373
|
+
|
|
4374
|
+
for cap in caps:
|
|
4375
|
+
c_val = cap.value if hasattr(cap, 'value') else str(cap)
|
|
4376
|
+
entity_grants.add(c_val)
|
|
4377
|
+
|
|
4378
|
+
elif op_name == "REVOKE_CAPABILITY":
|
|
4379
|
+
count = operand
|
|
4380
|
+
caps = [stack.pop() for _ in range(count)][::-1]
|
|
4381
|
+
entity_name = stack.pop() if stack else None
|
|
4382
|
+
if hasattr(entity_name, 'value'): entity_name = entity_name.value
|
|
4383
|
+
|
|
4384
|
+
if "_grants" in self.env and entity_name in self.env["_grants"]:
|
|
4385
|
+
entity_grants = self.env["_grants"][entity_name]
|
|
4386
|
+
for cap in caps:
|
|
4387
|
+
c_val = cap.value if hasattr(cap, 'value') else str(cap)
|
|
4388
|
+
if c_val in entity_grants:
|
|
4389
|
+
entity_grants.remove(c_val)
|
|
4390
|
+
|
|
4391
|
+
elif op_name == "AUDIT_LOG":
|
|
4392
|
+
ts = stack_pop()
|
|
4393
|
+
action = stack_pop()
|
|
4394
|
+
data = stack_pop()
|
|
4395
|
+
# Unwrap
|
|
4396
|
+
ts = ts.value if hasattr(ts, 'value') else ts
|
|
4397
|
+
action = action.value if hasattr(action, 'value') else action
|
|
4398
|
+
data = data.value if hasattr(data, 'value') else data
|
|
4399
|
+
|
|
4400
|
+
entry = {"timestamp": ts, "action": action, "data": data}
|
|
4401
|
+
self.env.setdefault("_audit_log", []).append(entry)
|
|
4402
|
+
if self.debug: print(f"[AUDIT] {entry}")
|
|
4403
|
+
|
|
4404
|
+
elif op_name == "RESTRICT_ACCESS":
|
|
4405
|
+
restriction = stack_pop()
|
|
4406
|
+
prop = stack_pop()
|
|
4407
|
+
obj = stack_pop()
|
|
4408
|
+
|
|
4409
|
+
# Enforce access restrictions for smart contract actions.
|
|
4410
|
+
# ``restriction`` can be:
|
|
4411
|
+
# - a string like "owner_only" — compared against TX.caller
|
|
4412
|
+
# - a list of allowed addresses
|
|
4413
|
+
# - a callable predicate
|
|
4414
|
+
r_key = f"{obj}.{prop}" if prop else str(obj)
|
|
4415
|
+
self.env.setdefault("_restrictions", {})[r_key] = restriction
|
|
4416
|
+
|
|
4417
|
+
# Real enforcement: check if the current caller matches
|
|
4418
|
+
caller = None
|
|
4419
|
+
tx_obj = self.env.get("TX")
|
|
4420
|
+
if tx_obj is not None:
|
|
4421
|
+
if hasattr(tx_obj, 'get'):
|
|
4422
|
+
caller_val = tx_obj.get(ZString("caller")) if hasattr(tx_obj, 'get') else None
|
|
4423
|
+
if caller_val is not None:
|
|
4424
|
+
caller = caller_val.value if hasattr(caller_val, 'value') else str(caller_val)
|
|
4425
|
+
|
|
4426
|
+
restriction_val = restriction.value if hasattr(restriction, 'value') else restriction
|
|
4427
|
+
if isinstance(restriction_val, str) and restriction_val == "owner_only":
|
|
4428
|
+
owner = self.env.get("owner")
|
|
4429
|
+
if owner is not None:
|
|
4430
|
+
owner_val = owner.value if hasattr(owner, 'value') else str(owner)
|
|
4431
|
+
if caller and caller != owner_val:
|
|
4432
|
+
raise ZEvaluationError(
|
|
4433
|
+
f"Access denied: '{r_key}' restricted to owner only"
|
|
4434
|
+
)
|
|
4435
|
+
elif isinstance(restriction_val, (list, tuple)):
|
|
4436
|
+
allowed = [a.value if hasattr(a, 'value') else str(a) for a in restriction_val]
|
|
4437
|
+
if caller and caller not in allowed:
|
|
4438
|
+
raise ZEvaluationError(
|
|
4439
|
+
f"Access denied: '{r_key}' restricted to allowed addresses"
|
|
4440
|
+
)
|
|
4441
|
+
|
|
4442
|
+
elif op_name == "LEDGER_APPEND":
|
|
4443
|
+
entry = stack.pop() if stack else None
|
|
4444
|
+
if isinstance(entry, dict) and "timestamp" not in entry:
|
|
4445
|
+
entry["timestamp"] = time.time()
|
|
4446
|
+
self.env.setdefault("_ledger", []).append(entry)
|
|
4447
|
+
|
|
4448
|
+
elif op_name in ("PARALLEL_START", "PARALLEL_END"):
|
|
4449
|
+
# Marker ops for parallel execution - no-op in stack VM
|
|
4450
|
+
pass
|
|
4451
|
+
|
|
4452
|
+
else:
|
|
4453
|
+
if debug: print(f"[VM] Unknown Opcode: {op}")
|
|
4454
|
+
|
|
4455
|
+
# Record instruction timing (if profiling enabled)
|
|
4456
|
+
if instr_start_time is not None and self.profiler:
|
|
4457
|
+
elapsed = time.perf_counter() - instr_start_time
|
|
4458
|
+
self.profiler.measure_instruction(current_ip, elapsed)
|
|
4459
|
+
except Exception as e:
|
|
4460
|
+
if self.env.get("_continue_on_error", False):
|
|
4461
|
+
# Error Recovery Mode
|
|
4462
|
+
if debug: print(f"[VM ERROR RECOVERY] {e}")
|
|
4463
|
+
self.env.setdefault("_errors", []).append(str(e))
|
|
4464
|
+
else:
|
|
4465
|
+
raise
|
|
1492
4466
|
|
|
1493
4467
|
if profile_ops and opcode_counts is not None:
|
|
1494
4468
|
self._last_opcode_profile = sorted(opcode_counts.items(), key=lambda item: item[1], reverse=True)
|
|
@@ -1512,20 +4486,26 @@ class VM:
|
|
|
1512
4486
|
|
|
1513
4487
|
local_env = {k: v for k, v in zip(params, args)}
|
|
1514
4488
|
|
|
1515
|
-
inner_vm = VM(
|
|
1516
|
-
|
|
1517
|
-
env=local_env
|
|
1518
|
-
parent_env=parent_env,
|
|
1519
|
-
# Inherit configuration
|
|
1520
|
-
use_jit=self.use_jit,
|
|
1521
|
-
use_memory_manager=self.use_memory_manager
|
|
4489
|
+
inner_vm = VM.create_child(
|
|
4490
|
+
parent_vm=parent_env if isinstance(parent_env, VM) else self,
|
|
4491
|
+
env=local_env
|
|
1522
4492
|
)
|
|
1523
|
-
|
|
4493
|
+
if not isinstance(parent_env, VM):
|
|
4494
|
+
inner_vm._parent_env = parent_env
|
|
4495
|
+
|
|
4496
|
+
snapshot = fn.get("closure_snapshot")
|
|
4497
|
+
if snapshot:
|
|
4498
|
+
for key, value in snapshot.items():
|
|
4499
|
+
inner_vm._closure_cells[key] = Cell(value)
|
|
4500
|
+
try:
|
|
4501
|
+
return await inner_vm._run_stack_bytecode(func_bc, debug=False)
|
|
4502
|
+
finally:
|
|
4503
|
+
self._return_vm_to_pool(inner_vm)
|
|
1524
4504
|
|
|
1525
4505
|
# 2. Python Callable / Builtin Wrapper
|
|
1526
4506
|
return await self._call_builtin_async_obj(fn, args)
|
|
1527
4507
|
|
|
1528
|
-
async def _call_builtin_async(self, name: str, args: List[Any]):
|
|
4508
|
+
async def _call_builtin_async(self, name: str, args: List[Any], wrap_args: bool = True):
|
|
1529
4509
|
target = self.builtins.get(name) or self.env.get(name)
|
|
1530
4510
|
|
|
1531
4511
|
# Check Renderer Backend
|
|
@@ -1533,30 +4513,84 @@ class VM:
|
|
|
1533
4513
|
fn = getattr(_BACKEND, name)
|
|
1534
4514
|
if asyncio.iscoroutinefunction(fn): return await fn(*args)
|
|
1535
4515
|
return fn(*args)
|
|
1536
|
-
|
|
1537
|
-
|
|
4516
|
+
|
|
4517
|
+
if target is None:
|
|
4518
|
+
return self._call_fallback_builtin(name, args)
|
|
1538
4519
|
|
|
1539
|
-
|
|
4520
|
+
return await self._call_builtin_async_obj(target, args, wrap_args=wrap_args)
|
|
4521
|
+
|
|
4522
|
+
async def _call_builtin_async_obj(self, fn_obj, args: List[Any], wrap_args: bool = True):
|
|
1540
4523
|
try:
|
|
1541
4524
|
if fn_obj is None: return None
|
|
1542
4525
|
|
|
1543
4526
|
# Extract .fn if it's a wrapper
|
|
1544
4527
|
real_fn = fn_obj.fn if hasattr(fn_obj, "fn") else fn_obj
|
|
4528
|
+
|
|
4529
|
+
# Execute Zexus Action/LambdaFunction via VM if possible, fallback to evaluator
|
|
4530
|
+
try:
|
|
4531
|
+
ZAction, ZLambda = _get_action_types()
|
|
4532
|
+
if ZAction is not None and isinstance(real_fn, (ZAction, ZLambda)):
|
|
4533
|
+
# Try to compile to bytecode and execute in VM (fast path)
|
|
4534
|
+
action_bytecode = None
|
|
4535
|
+
try:
|
|
4536
|
+
if hasattr(real_fn, '_cached_bytecode'):
|
|
4537
|
+
action_bytecode = real_fn._cached_bytecode
|
|
4538
|
+
else:
|
|
4539
|
+
from ..evaluator.bytecode_compiler import EvaluatorBytecodeCompiler
|
|
4540
|
+
compiler = EvaluatorBytecodeCompiler(use_cache=False)
|
|
4541
|
+
action_bytecode = compiler.compile(real_fn.body, optimize=True)
|
|
4542
|
+
if action_bytecode and not compiler.errors:
|
|
4543
|
+
real_fn._cached_bytecode = action_bytecode
|
|
4544
|
+
except Exception:
|
|
4545
|
+
action_bytecode = None
|
|
4546
|
+
|
|
4547
|
+
if action_bytecode:
|
|
4548
|
+
# Execute via VM (fast)
|
|
4549
|
+
call_args = [self._wrap_for_builtin(arg) for arg in args] if wrap_args else list(args)
|
|
4550
|
+
params = real_fn.parameters if hasattr(real_fn, 'parameters') else []
|
|
4551
|
+
local_env = {k.value if hasattr(k, 'value') else k: v for k, v in zip(params, call_args)}
|
|
4552
|
+
inner_vm = VM.create_child(parent_vm=self, env=local_env)
|
|
4553
|
+
try:
|
|
4554
|
+
result = inner_vm._run_stack_bytecode_sync(action_bytecode, debug=False)
|
|
4555
|
+
finally:
|
|
4556
|
+
self._return_vm_to_pool(inner_vm)
|
|
4557
|
+
return self._unwrap_after_builtin(result)
|
|
4558
|
+
else:
|
|
4559
|
+
# Fallback to interpreter (slow)
|
|
4560
|
+
from ..evaluator.core import Evaluator
|
|
4561
|
+
if self._action_evaluator is None:
|
|
4562
|
+
self._action_evaluator = Evaluator(use_vm=False)
|
|
4563
|
+
call_args = [self._wrap_for_builtin(arg) for arg in args] if wrap_args else list(args)
|
|
4564
|
+
result = self._action_evaluator.apply_function(real_fn, call_args)
|
|
4565
|
+
trace_errors = os.environ.get("ZEXUS_VM_TRACE_ERRORS")
|
|
4566
|
+
if trace_errors and trace_errors.lower() not in ("0", "false", "off"):
|
|
4567
|
+
if isinstance(result, ZEvaluationError):
|
|
4568
|
+
print(f"[VM TRACE] action error: {result.message}")
|
|
4569
|
+
return self._unwrap_after_builtin(result)
|
|
4570
|
+
except Exception:
|
|
4571
|
+
pass
|
|
1545
4572
|
|
|
1546
4573
|
if not callable(real_fn): return real_fn
|
|
1547
4574
|
|
|
1548
|
-
|
|
4575
|
+
call_args = [self._wrap_for_builtin(arg) for arg in args] if wrap_args else list(args)
|
|
1549
4576
|
verbose_flag = os.environ.get("ZEXUS_VM_PROFILE_VERBOSE")
|
|
1550
4577
|
verbose_active = verbose_flag and verbose_flag.lower() not in ("0", "false", "off")
|
|
1551
4578
|
if verbose_active:
|
|
1552
4579
|
fn_name = getattr(fn_obj, "name", getattr(real_fn, "__name__", "<callable>"))
|
|
1553
|
-
print(f"[VM DEBUG] calling builtin {fn_name} args={[type(a).__name__ for a in
|
|
1554
|
-
res = real_fn(*
|
|
4580
|
+
print(f"[VM DEBUG] calling builtin {fn_name} args={[type(a).__name__ for a in call_args]}")
|
|
4581
|
+
res = real_fn(*call_args)
|
|
4582
|
+
trace_errors = os.environ.get("ZEXUS_VM_TRACE_ERRORS")
|
|
4583
|
+
if trace_errors and trace_errors.lower() not in ("0", "false", "off"):
|
|
4584
|
+
if isinstance(res, ZEvaluationError):
|
|
4585
|
+
print(f"[VM TRACE] builtin error: {res.message}")
|
|
1555
4586
|
if verbose_active and res is None:
|
|
1556
4587
|
fn_name = getattr(fn_obj, "name", getattr(real_fn, "__name__", "<callable>"))
|
|
1557
|
-
print(f"[VM DEBUG] builtin {fn_name} returned None args={
|
|
4588
|
+
print(f"[VM DEBUG] builtin {fn_name} returned None args={call_args}")
|
|
1558
4589
|
if asyncio.iscoroutine(res) or isinstance(res, asyncio.Future):
|
|
1559
|
-
|
|
4590
|
+
if self.async_optimizer:
|
|
4591
|
+
res = await self.async_optimizer.await_optimized(res)
|
|
4592
|
+
else:
|
|
4593
|
+
res = await res
|
|
1560
4594
|
return self._unwrap_after_builtin(res)
|
|
1561
4595
|
except Exception as e:
|
|
1562
4596
|
return e
|
|
@@ -1569,21 +4603,37 @@ class VM:
|
|
|
1569
4603
|
return None
|
|
1570
4604
|
return _wrap()
|
|
1571
4605
|
|
|
4606
|
+
def _call_fallback_builtin(self, name: str, args: List[Any]):
|
|
4607
|
+
func = _FALLBACK_BUILTINS.get(name)
|
|
4608
|
+
if not func:
|
|
4609
|
+
return None
|
|
4610
|
+
try:
|
|
4611
|
+
return func(args)
|
|
4612
|
+
except Exception as exc:
|
|
4613
|
+
if self.debug:
|
|
4614
|
+
print(f"[VM] fallback builtin '{name}' failed: {exc}")
|
|
4615
|
+
return ZEvaluationError(f"Builtin '{name}' failed: {exc}")
|
|
4616
|
+
|
|
1572
4617
|
def profile_execution(self, bytecode, iterations: int = 1000) -> Dict[str, Any]:
|
|
1573
4618
|
"""Profile execution performance across available modes"""
|
|
1574
4619
|
import timeit
|
|
1575
4620
|
results = {'iterations': iterations, 'modes': {}}
|
|
1576
4621
|
|
|
1577
4622
|
# Stack
|
|
1578
|
-
def run_stack(): return
|
|
4623
|
+
def run_stack(): return self._run_coroutine_sync(self._execute_stack(bytecode))
|
|
1579
4624
|
t_stack = timeit.timeit(run_stack, number=iterations)
|
|
1580
|
-
|
|
4625
|
+
stack_avg = t_stack / iterations if iterations else 0.0
|
|
4626
|
+
results['modes']['stack'] = {'total': t_stack, 'avg': stack_avg}
|
|
1581
4627
|
|
|
1582
4628
|
# Register
|
|
1583
4629
|
if self._register_vm:
|
|
1584
4630
|
def run_reg(): return self._execute_register(bytecode)
|
|
1585
4631
|
t_reg = timeit.timeit(run_reg, number=iterations)
|
|
1586
|
-
|
|
4632
|
+
reg_avg = t_reg / iterations if iterations else 0.0
|
|
4633
|
+
reg_entry = {'total': t_reg, 'avg': reg_avg}
|
|
4634
|
+
if t_reg > 0:
|
|
4635
|
+
reg_entry['speedup'] = t_stack / t_reg if t_stack > 0 else float('inf')
|
|
4636
|
+
results['modes']['register'] = reg_entry
|
|
1587
4637
|
|
|
1588
4638
|
return results
|
|
1589
4639
|
|