zexus 1.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +0 -0
- package/README.md +2513 -0
- package/bin/zexus +2 -0
- package/bin/zpics +2 -0
- package/bin/zpm +2 -0
- package/bin/zx +2 -0
- package/bin/zx-deploy +2 -0
- package/bin/zx-dev +2 -0
- package/bin/zx-run +2 -0
- package/package.json +66 -0
- package/scripts/README.md +24 -0
- package/scripts/postinstall.js +44 -0
- package/shared_config.json +24 -0
- package/src/README.md +1525 -0
- package/src/tests/run_zexus_tests.py +117 -0
- package/src/tests/test_all_phases.zx +346 -0
- package/src/tests/test_blockchain_features.zx +306 -0
- package/src/tests/test_complexity_features.zx +321 -0
- package/src/tests/test_core_integration.py +185 -0
- package/src/tests/test_phase10_ecosystem.zx +177 -0
- package/src/tests/test_phase1_modifiers.zx +87 -0
- package/src/tests/test_phase2_plugins.zx +80 -0
- package/src/tests/test_phase3_security.zx +97 -0
- package/src/tests/test_phase4_vfs.zx +116 -0
- package/src/tests/test_phase5_types.zx +117 -0
- package/src/tests/test_phase6_metaprogramming.zx +125 -0
- package/src/tests/test_phase7_optimization.zx +132 -0
- package/src/tests/test_phase9_advanced_types.zx +157 -0
- package/src/tests/test_security_features.py +419 -0
- package/src/tests/test_security_features.zx +276 -0
- package/src/tests/test_simple_zx.zx +1 -0
- package/src/tests/test_verification_simple.zx +69 -0
- package/src/zexus/__init__.py +28 -0
- package/src/zexus/__main__.py +5 -0
- package/src/zexus/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/advanced_types.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/builtin_modules.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/capability_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/complexity_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/concurrency_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/config.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/dependency_injection.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/ecosystem.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/environment.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/error_reporter.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/hybrid_orchestrator.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/metaprogramming.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/module_cache.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/object.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/optimization.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/plugin_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/policy_engine.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/security.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/stdlib_integration.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/strategy_recovery.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/syntax_validator.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/type_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/virtual_filesystem.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_token.cpython-312.pyc +0 -0
- package/src/zexus/advanced_types.py +401 -0
- package/src/zexus/blockchain/__init__.py +40 -0
- package/src/zexus/blockchain/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/crypto.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/ledger.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/transaction.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/crypto.py +463 -0
- package/src/zexus/blockchain/ledger.py +255 -0
- package/src/zexus/blockchain/transaction.py +267 -0
- package/src/zexus/builtin_modules.py +284 -0
- package/src/zexus/builtin_plugins.py +317 -0
- package/src/zexus/capability_system.py +372 -0
- package/src/zexus/cli/__init__.py +2 -0
- package/src/zexus/cli/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/cli/__pycache__/main.cpython-312.pyc +0 -0
- package/src/zexus/cli/main.py +707 -0
- package/src/zexus/cli/zpm.py +203 -0
- package/src/zexus/compare_interpreter_compiler.py +146 -0
- package/src/zexus/compiler/__init__.py +169 -0
- package/src/zexus/compiler/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/compiler/bytecode.py +266 -0
- package/src/zexus/compiler/compat_runtime.py +277 -0
- package/src/zexus/compiler/lexer.py +257 -0
- package/src/zexus/compiler/parser.py +779 -0
- package/src/zexus/compiler/semantic.py +118 -0
- package/src/zexus/compiler/zexus_ast.py +454 -0
- package/src/zexus/complexity_system.py +575 -0
- package/src/zexus/concurrency_system.py +493 -0
- package/src/zexus/config.py +201 -0
- package/src/zexus/crypto_bridge.py +19 -0
- package/src/zexus/dependency_injection.py +423 -0
- package/src/zexus/ecosystem.py +434 -0
- package/src/zexus/environment.py +101 -0
- package/src/zexus/environment_manager.py +119 -0
- package/src/zexus/error_reporter.py +314 -0
- package/src/zexus/evaluator/__init__.py +12 -0
- package/src/zexus/evaluator/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/bytecode_compiler.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/core.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/expressions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/functions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/integration.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/statements.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/utils.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/bytecode_compiler.py +700 -0
- package/src/zexus/evaluator/core.py +891 -0
- package/src/zexus/evaluator/expressions.py +827 -0
- package/src/zexus/evaluator/functions.py +3989 -0
- package/src/zexus/evaluator/integration.py +396 -0
- package/src/zexus/evaluator/statements.py +4303 -0
- package/src/zexus/evaluator/utils.py +126 -0
- package/src/zexus/evaluator_original.py +2041 -0
- package/src/zexus/external_bridge.py +16 -0
- package/src/zexus/find_affected_imports.sh +155 -0
- package/src/zexus/hybrid_orchestrator.py +152 -0
- package/src/zexus/input_validation.py +259 -0
- package/src/zexus/lexer.py +571 -0
- package/src/zexus/logging.py +89 -0
- package/src/zexus/lsp/__init__.py +9 -0
- package/src/zexus/lsp/completion_provider.py +207 -0
- package/src/zexus/lsp/definition_provider.py +22 -0
- package/src/zexus/lsp/hover_provider.py +71 -0
- package/src/zexus/lsp/server.py +269 -0
- package/src/zexus/lsp/symbol_provider.py +31 -0
- package/src/zexus/metaprogramming.py +321 -0
- package/src/zexus/module_cache.py +89 -0
- package/src/zexus/module_manager.py +107 -0
- package/src/zexus/object.py +973 -0
- package/src/zexus/optimization.py +424 -0
- package/src/zexus/parser/__init__.py +31 -0
- package/src/zexus/parser/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_context.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_structural.cpython-312.pyc +0 -0
- package/src/zexus/parser/integration.py +86 -0
- package/src/zexus/parser/parser.py +3977 -0
- package/src/zexus/parser/strategy_context.py +7254 -0
- package/src/zexus/parser/strategy_structural.py +1033 -0
- package/src/zexus/persistence.py +391 -0
- package/src/zexus/plugin_system.py +290 -0
- package/src/zexus/policy_engine.py +365 -0
- package/src/zexus/profiler/__init__.py +5 -0
- package/src/zexus/profiler/profiler.py +233 -0
- package/src/zexus/purity_system.py +398 -0
- package/src/zexus/runtime/__init__.py +20 -0
- package/src/zexus/runtime/async_runtime.py +324 -0
- package/src/zexus/search_old_imports.sh +65 -0
- package/src/zexus/security.py +1407 -0
- package/src/zexus/stack_trace.py +233 -0
- package/src/zexus/stdlib/__init__.py +27 -0
- package/src/zexus/stdlib/blockchain.py +341 -0
- package/src/zexus/stdlib/compression.py +167 -0
- package/src/zexus/stdlib/crypto.py +124 -0
- package/src/zexus/stdlib/datetime.py +163 -0
- package/src/zexus/stdlib/db_mongo.py +199 -0
- package/src/zexus/stdlib/db_mysql.py +162 -0
- package/src/zexus/stdlib/db_postgres.py +163 -0
- package/src/zexus/stdlib/db_sqlite.py +133 -0
- package/src/zexus/stdlib/encoding.py +230 -0
- package/src/zexus/stdlib/fs.py +195 -0
- package/src/zexus/stdlib/http.py +219 -0
- package/src/zexus/stdlib/http_server.py +248 -0
- package/src/zexus/stdlib/json_module.py +61 -0
- package/src/zexus/stdlib/math.py +360 -0
- package/src/zexus/stdlib/os_module.py +265 -0
- package/src/zexus/stdlib/regex.py +148 -0
- package/src/zexus/stdlib/sockets.py +253 -0
- package/src/zexus/stdlib/test_framework.zx +208 -0
- package/src/zexus/stdlib/test_runner.zx +119 -0
- package/src/zexus/stdlib_integration.py +341 -0
- package/src/zexus/strategy_recovery.py +256 -0
- package/src/zexus/syntax_validator.py +356 -0
- package/src/zexus/testing/zpics.py +407 -0
- package/src/zexus/testing/zpics_runtime.py +369 -0
- package/src/zexus/type_system.py +374 -0
- package/src/zexus/validation_system.py +569 -0
- package/src/zexus/virtual_filesystem.py +355 -0
- package/src/zexus/vm/__init__.py +8 -0
- package/src/zexus/vm/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/async_optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/bytecode.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/cache.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/jit.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/memory_manager.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/memory_pool.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/parallel_vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/peephole_optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/profiler.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/register_allocator.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/register_vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/ssa_converter.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/async_optimizer.py +420 -0
- package/src/zexus/vm/bytecode.py +428 -0
- package/src/zexus/vm/bytecode_converter.py +297 -0
- package/src/zexus/vm/cache.py +532 -0
- package/src/zexus/vm/jit.py +720 -0
- package/src/zexus/vm/memory_manager.py +520 -0
- package/src/zexus/vm/memory_pool.py +511 -0
- package/src/zexus/vm/optimizer.py +478 -0
- package/src/zexus/vm/parallel_vm.py +899 -0
- package/src/zexus/vm/peephole_optimizer.py +452 -0
- package/src/zexus/vm/profiler.py +527 -0
- package/src/zexus/vm/register_allocator.py +462 -0
- package/src/zexus/vm/register_vm.py +520 -0
- package/src/zexus/vm/ssa_converter.py +757 -0
- package/src/zexus/vm/vm.py +1392 -0
- package/src/zexus/zexus_ast.py +1782 -0
- package/src/zexus/zexus_token.py +253 -0
- package/src/zexus/zpm/__init__.py +15 -0
- package/src/zexus/zpm/installer.py +116 -0
- package/src/zexus/zpm/package_manager.py +208 -0
- package/src/zexus/zpm/publisher.py +98 -0
- package/src/zexus/zpm/registry.py +110 -0
- package/src/zexus.egg-info/PKG-INFO +2235 -0
- package/src/zexus.egg-info/SOURCES.txt +876 -0
- package/src/zexus.egg-info/dependency_links.txt +1 -0
- package/src/zexus.egg-info/entry_points.txt +3 -0
- package/src/zexus.egg-info/not-zip-safe +1 -0
- package/src/zexus.egg-info/requires.txt +14 -0
- package/src/zexus.egg-info/top_level.txt +2 -0
- package/zexus.json +14 -0
|
@@ -0,0 +1,1392 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Integrated Extended VM for Zexus.
|
|
3
|
+
|
|
4
|
+
Capabilities:
|
|
5
|
+
- Architecture: Stack, Register, and Parallel execution modes.
|
|
6
|
+
- Compilation: Tiered compilation with JIT (Hot path detection).
|
|
7
|
+
- Memory: Managed memory with Garbage Collection.
|
|
8
|
+
- Formats: High-level ops list and Low-level Bytecode.
|
|
9
|
+
- Features: Async primitives (SPAWN/AWAIT), Event System, Module Imports.
|
|
10
|
+
- Blockchain: Ziver-Chain specific opcodes (Merkle, Hash, State, Gas).
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import sys
|
|
14
|
+
import time
|
|
15
|
+
import asyncio
|
|
16
|
+
import importlib
|
|
17
|
+
import hashlib
|
|
18
|
+
import types
|
|
19
|
+
from typing import List, Any, Dict, Tuple, Optional, Union, Callable
|
|
20
|
+
from enum import Enum
|
|
21
|
+
|
|
22
|
+
# ==================== Backend / Optional Imports ====================
|
|
23
|
+
|
|
24
|
+
# JIT Compiler
|
|
25
|
+
try:
|
|
26
|
+
from .jit import JITCompiler, ExecutionTier
|
|
27
|
+
_JIT_AVAILABLE = True
|
|
28
|
+
except ImportError:
|
|
29
|
+
_JIT_AVAILABLE = False
|
|
30
|
+
JITCompiler = None
|
|
31
|
+
ExecutionTier = Enum('ExecutionTier', ['INTERPRETED', 'BYTECODE', 'JIT_NATIVE'])
|
|
32
|
+
|
|
33
|
+
# Memory Manager
|
|
34
|
+
try:
|
|
35
|
+
from .memory_manager import create_memory_manager
|
|
36
|
+
_MEMORY_MANAGER_AVAILABLE = True
|
|
37
|
+
except ImportError:
|
|
38
|
+
_MEMORY_MANAGER_AVAILABLE = False
|
|
39
|
+
|
|
40
|
+
# Register VM (Phase 5)
|
|
41
|
+
try:
|
|
42
|
+
from .register_vm import RegisterVM
|
|
43
|
+
_REGISTER_VM_AVAILABLE = True
|
|
44
|
+
except ImportError:
|
|
45
|
+
_REGISTER_VM_AVAILABLE = False
|
|
46
|
+
|
|
47
|
+
# Parallel VM (Phase 6)
|
|
48
|
+
try:
|
|
49
|
+
from .parallel_vm import ParallelVM, ExecutionMode
|
|
50
|
+
_PARALLEL_VM_AVAILABLE = True
|
|
51
|
+
except ImportError:
|
|
52
|
+
_PARALLEL_VM_AVAILABLE = False
|
|
53
|
+
|
|
54
|
+
# Profiler (Phase 8)
|
|
55
|
+
try:
|
|
56
|
+
from .profiler import InstructionProfiler, ProfilingLevel
|
|
57
|
+
_PROFILER_AVAILABLE = True
|
|
58
|
+
except ImportError:
|
|
59
|
+
_PROFILER_AVAILABLE = False
|
|
60
|
+
InstructionProfiler = None
|
|
61
|
+
ProfilingLevel = None
|
|
62
|
+
|
|
63
|
+
# Memory Pool (Phase 8)
|
|
64
|
+
try:
|
|
65
|
+
from .memory_pool import IntegerPool, StringPool, ListPool
|
|
66
|
+
_MEMORY_POOL_AVAILABLE = True
|
|
67
|
+
except ImportError:
|
|
68
|
+
_MEMORY_POOL_AVAILABLE = False
|
|
69
|
+
IntegerPool = None
|
|
70
|
+
StringPool = None
|
|
71
|
+
ListPool = None
|
|
72
|
+
|
|
73
|
+
# Peephole Optimizer (Phase 8)
|
|
74
|
+
try:
|
|
75
|
+
from .peephole_optimizer import PeepholeOptimizer, OptimizationLevel
|
|
76
|
+
_PEEPHOLE_OPTIMIZER_AVAILABLE = True
|
|
77
|
+
except ImportError:
|
|
78
|
+
_PEEPHOLE_OPTIMIZER_AVAILABLE = False
|
|
79
|
+
PeepholeOptimizer = None
|
|
80
|
+
OptimizationLevel = None
|
|
81
|
+
|
|
82
|
+
# Async Optimizer (Phase 8)
|
|
83
|
+
try:
|
|
84
|
+
from .async_optimizer import AsyncOptimizer, AsyncOptimizationLevel
|
|
85
|
+
_ASYNC_OPTIMIZER_AVAILABLE = True
|
|
86
|
+
except ImportError:
|
|
87
|
+
_ASYNC_OPTIMIZER_AVAILABLE = False
|
|
88
|
+
AsyncOptimizer = None
|
|
89
|
+
AsyncOptimizationLevel = None
|
|
90
|
+
|
|
91
|
+
# SSA Converter & Register Allocator (Phase 8.5)
|
|
92
|
+
try:
|
|
93
|
+
from .ssa_converter import SSAConverter, SSAProgram, destruct_ssa
|
|
94
|
+
from .register_allocator import RegisterAllocator, compute_live_ranges, AllocationResult
|
|
95
|
+
_SSA_AVAILABLE = True
|
|
96
|
+
except ImportError:
|
|
97
|
+
_SSA_AVAILABLE = False
|
|
98
|
+
SSAConverter = None
|
|
99
|
+
RegisterAllocator = None
|
|
100
|
+
|
|
101
|
+
# Renderer Backend
|
|
102
|
+
try:
|
|
103
|
+
from renderer import backend as _BACKEND
|
|
104
|
+
_BACKEND_AVAILABLE = True
|
|
105
|
+
except Exception:
|
|
106
|
+
_BACKEND_AVAILABLE = False
|
|
107
|
+
_BACKEND = None
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
# ==================== Core Definitions ====================
|
|
111
|
+
|
|
112
|
+
class VMMode(Enum):
|
|
113
|
+
"""Execution modes for the VM"""
|
|
114
|
+
STACK = "stack" # Stack-based execution (standard)
|
|
115
|
+
REGISTER = "register" # Register-based execution (optimized)
|
|
116
|
+
PARALLEL = "parallel" # Parallel execution (multi-core)
|
|
117
|
+
AUTO = "auto" # Automatically choose best mode
|
|
118
|
+
|
|
119
|
+
class Cell:
|
|
120
|
+
"""Mutable cell used for proper closure capture semantics"""
|
|
121
|
+
def __init__(self, value):
|
|
122
|
+
self.value = value
|
|
123
|
+
|
|
124
|
+
def __repr__(self):
|
|
125
|
+
return f"<Cell {self.value!r}>"
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
class VM:
|
|
129
|
+
"""
|
|
130
|
+
Main Virtual Machine integrating advanced architecture with rich feature set.
|
|
131
|
+
"""
|
|
132
|
+
|
|
133
|
+
def __init__(
|
|
134
|
+
self,
|
|
135
|
+
builtins: Dict[str, Any] = None,
|
|
136
|
+
env: Dict[str, Any] = None,
|
|
137
|
+
parent_env: Dict[str, Any] = None,
|
|
138
|
+
use_jit: bool = True,
|
|
139
|
+
jit_threshold: int = 100,
|
|
140
|
+
use_memory_manager: bool = False,
|
|
141
|
+
max_heap_mb: int = 100,
|
|
142
|
+
mode: VMMode = VMMode.AUTO,
|
|
143
|
+
worker_count: int = None,
|
|
144
|
+
chunk_size: int = 50,
|
|
145
|
+
num_registers: int = 16,
|
|
146
|
+
hybrid_mode: bool = True,
|
|
147
|
+
debug: bool = False,
|
|
148
|
+
enable_profiling: bool = False,
|
|
149
|
+
profiling_level: str = "DETAILED",
|
|
150
|
+
enable_memory_pool: bool = True,
|
|
151
|
+
pool_max_size: int = 1000,
|
|
152
|
+
enable_peephole_optimizer: bool = True,
|
|
153
|
+
optimization_level: str = "MODERATE",
|
|
154
|
+
enable_async_optimizer: bool = True,
|
|
155
|
+
async_optimization_level: str = "MODERATE",
|
|
156
|
+
enable_ssa: bool = False,
|
|
157
|
+
enable_register_allocation: bool = False,
|
|
158
|
+
num_allocator_registers: int = 16
|
|
159
|
+
):
|
|
160
|
+
"""
|
|
161
|
+
Initialize the enhanced VM.
|
|
162
|
+
"""
|
|
163
|
+
# --- Environment Setup ---
|
|
164
|
+
self.builtins = builtins or {}
|
|
165
|
+
self.env = env or {}
|
|
166
|
+
self._parent_env = parent_env
|
|
167
|
+
self.debug = debug
|
|
168
|
+
|
|
169
|
+
# --- State Tracking ---
|
|
170
|
+
self._events: Dict[str, List[Any]] = {} # Event registry
|
|
171
|
+
self._tasks: Dict[str, asyncio.Task] = {} # Async tasks
|
|
172
|
+
self._task_counter = 0
|
|
173
|
+
self._closure_cells: Dict[str, Cell] = {} # Closure storage
|
|
174
|
+
self._execution_count = 0
|
|
175
|
+
self._total_execution_time = 0.0
|
|
176
|
+
self._mode_usage = {m.value: 0 for m in VMMode}
|
|
177
|
+
|
|
178
|
+
# --- JIT Compilation (Phase 2) ---
|
|
179
|
+
self.use_jit = use_jit and _JIT_AVAILABLE
|
|
180
|
+
self._jit_lock = None # Thread lock for JIT compilation
|
|
181
|
+
if self.use_jit:
|
|
182
|
+
import threading
|
|
183
|
+
self._jit_lock = threading.Lock()
|
|
184
|
+
self.jit_compiler = JITCompiler(
|
|
185
|
+
hot_threshold=jit_threshold,
|
|
186
|
+
optimization_level=1,
|
|
187
|
+
debug=debug
|
|
188
|
+
)
|
|
189
|
+
self._jit_execution_stats: Dict[str, List[float]] = {}
|
|
190
|
+
self._execution_times: Dict[str, float] = {}
|
|
191
|
+
else:
|
|
192
|
+
self.jit_compiler = None
|
|
193
|
+
|
|
194
|
+
# --- Memory Management (Phase 7) ---
|
|
195
|
+
self.use_memory_manager = use_memory_manager and _MEMORY_MANAGER_AVAILABLE
|
|
196
|
+
self.memory_manager = None
|
|
197
|
+
self._managed_objects: Dict[str, int] = {}
|
|
198
|
+
self._memory_lock = None # Thread lock for memory operations
|
|
199
|
+
if self.use_memory_manager:
|
|
200
|
+
import threading
|
|
201
|
+
self._memory_lock = threading.Lock()
|
|
202
|
+
self.memory_manager = create_memory_manager(
|
|
203
|
+
max_heap_mb=max_heap_mb,
|
|
204
|
+
gc_threshold=1000
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
# --- Profiler (Phase 8) ---
|
|
208
|
+
self.enable_profiling = enable_profiling and _PROFILER_AVAILABLE
|
|
209
|
+
self.profiler = None
|
|
210
|
+
if self.enable_profiling:
|
|
211
|
+
try:
|
|
212
|
+
level = getattr(ProfilingLevel, profiling_level, ProfilingLevel.DETAILED)
|
|
213
|
+
self.profiler = InstructionProfiler(level=level)
|
|
214
|
+
if debug:
|
|
215
|
+
print(f"[VM] Profiler enabled: {profiling_level}")
|
|
216
|
+
except Exception as e:
|
|
217
|
+
if debug:
|
|
218
|
+
print(f"[VM] Failed to enable profiler: {e}")
|
|
219
|
+
self.enable_profiling = False
|
|
220
|
+
|
|
221
|
+
# --- Memory Pool (Phase 8) ---
|
|
222
|
+
self.enable_memory_pool = enable_memory_pool and _MEMORY_POOL_AVAILABLE
|
|
223
|
+
self.integer_pool = None
|
|
224
|
+
self.string_pool = None
|
|
225
|
+
self.list_pool = None
|
|
226
|
+
if self.enable_memory_pool:
|
|
227
|
+
try:
|
|
228
|
+
self.integer_pool = IntegerPool(max_size=pool_max_size)
|
|
229
|
+
self.string_pool = StringPool(max_size=pool_max_size)
|
|
230
|
+
self.list_pool = ListPool(max_pool_size=pool_max_size)
|
|
231
|
+
if debug:
|
|
232
|
+
print(f"[VM] Memory pools enabled: max_size={pool_max_size}")
|
|
233
|
+
except Exception as e:
|
|
234
|
+
if debug:
|
|
235
|
+
print(f"[VM] Failed to enable memory pools: {e}")
|
|
236
|
+
self.enable_memory_pool = False
|
|
237
|
+
|
|
238
|
+
# --- Peephole Optimizer (Phase 8) ---
|
|
239
|
+
self.enable_peephole_optimizer = enable_peephole_optimizer and _PEEPHOLE_OPTIMIZER_AVAILABLE
|
|
240
|
+
self.peephole_optimizer = None
|
|
241
|
+
if self.enable_peephole_optimizer:
|
|
242
|
+
try:
|
|
243
|
+
level = getattr(OptimizationLevel, optimization_level, OptimizationLevel.MODERATE)
|
|
244
|
+
self.peephole_optimizer = PeepholeOptimizer(level=level)
|
|
245
|
+
if debug:
|
|
246
|
+
print(f"[VM] Peephole optimizer enabled: {optimization_level}")
|
|
247
|
+
except Exception as e:
|
|
248
|
+
if debug:
|
|
249
|
+
print(f"[VM] Failed to enable peephole optimizer: {e}")
|
|
250
|
+
self.enable_peephole_optimizer = False
|
|
251
|
+
|
|
252
|
+
# --- Async Optimizer (Phase 8) ---
|
|
253
|
+
self.enable_async_optimizer = enable_async_optimizer and _ASYNC_OPTIMIZER_AVAILABLE
|
|
254
|
+
self.async_optimizer = None
|
|
255
|
+
if self.enable_async_optimizer:
|
|
256
|
+
try:
|
|
257
|
+
level = getattr(AsyncOptimizationLevel, async_optimization_level, AsyncOptimizationLevel.MODERATE)
|
|
258
|
+
self.async_optimizer = AsyncOptimizer(level=level, pool_size=pool_max_size)
|
|
259
|
+
if debug:
|
|
260
|
+
print(f"[VM] Async optimizer enabled: {async_optimization_level}")
|
|
261
|
+
except Exception as e:
|
|
262
|
+
if debug:
|
|
263
|
+
print(f"[VM] Failed to enable async optimizer: {e}")
|
|
264
|
+
self.enable_async_optimizer = False
|
|
265
|
+
|
|
266
|
+
# --- SSA Converter & Register Allocator (Phase 8.5) ---
|
|
267
|
+
self.enable_ssa = enable_ssa and _SSA_AVAILABLE
|
|
268
|
+
self.enable_register_allocation = enable_register_allocation and _SSA_AVAILABLE
|
|
269
|
+
self.ssa_converter = None
|
|
270
|
+
self.register_allocator = None
|
|
271
|
+
|
|
272
|
+
if self.enable_ssa:
|
|
273
|
+
try:
|
|
274
|
+
self.ssa_converter = SSAConverter(optimize=True)
|
|
275
|
+
if debug:
|
|
276
|
+
print("[VM] SSA converter enabled")
|
|
277
|
+
except Exception as e:
|
|
278
|
+
if debug:
|
|
279
|
+
print(f"[VM] Failed to enable SSA converter: {e}")
|
|
280
|
+
self.enable_ssa = False
|
|
281
|
+
|
|
282
|
+
if self.enable_register_allocation:
|
|
283
|
+
try:
|
|
284
|
+
self.register_allocator = RegisterAllocator(
|
|
285
|
+
num_registers=num_allocator_registers,
|
|
286
|
+
num_temp_registers=8
|
|
287
|
+
)
|
|
288
|
+
if debug:
|
|
289
|
+
print(f"[VM] Register allocator enabled: {num_allocator_registers} registers")
|
|
290
|
+
except Exception as e:
|
|
291
|
+
if debug:
|
|
292
|
+
print(f"[VM] Failed to enable register allocator: {e}")
|
|
293
|
+
self.enable_register_allocation = False
|
|
294
|
+
|
|
295
|
+
# --- Execution Mode Configuration ---
|
|
296
|
+
self.mode = mode
|
|
297
|
+
self.worker_count = worker_count
|
|
298
|
+
self.chunk_size = chunk_size
|
|
299
|
+
self.num_registers = num_registers
|
|
300
|
+
self.hybrid_mode = hybrid_mode
|
|
301
|
+
|
|
302
|
+
# Initialize specialized VMs
|
|
303
|
+
self._register_vm = None
|
|
304
|
+
self._parallel_vm = None
|
|
305
|
+
|
|
306
|
+
if _REGISTER_VM_AVAILABLE and (mode == VMMode.REGISTER or mode == VMMode.AUTO):
|
|
307
|
+
self._register_vm = RegisterVM(
|
|
308
|
+
num_registers=num_registers,
|
|
309
|
+
hybrid_mode=hybrid_mode
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
if _PARALLEL_VM_AVAILABLE and (mode == VMMode.PARALLEL or mode == VMMode.AUTO):
|
|
313
|
+
self._parallel_vm = ParallelVM(
|
|
314
|
+
worker_count=worker_count or self._get_cpu_count(),
|
|
315
|
+
chunk_size=chunk_size
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
if debug:
|
|
319
|
+
print(f"[VM] Initialized | Mode: {mode.value} | JIT: {self.use_jit} | MemMgr: {self.use_memory_manager}")
|
|
320
|
+
|
|
321
|
+
def _get_cpu_count(self) -> int:
|
|
322
|
+
import os
|
|
323
|
+
try:
|
|
324
|
+
return len(os.sched_getaffinity(0))
|
|
325
|
+
except AttributeError:
|
|
326
|
+
return os.cpu_count() or 1
|
|
327
|
+
|
|
328
|
+
# ==================== Public Execution API ====================
|
|
329
|
+
|
|
330
|
+
def execute(self, code: Union[List[Tuple], Any], debug: bool = False) -> Any:
|
|
331
|
+
"""
|
|
332
|
+
Execute code (High-level ops or Bytecode) using optimal execution mode.
|
|
333
|
+
Blocks until completion (wraps async execution).
|
|
334
|
+
"""
|
|
335
|
+
start_time = time.perf_counter()
|
|
336
|
+
self._execution_count += 1
|
|
337
|
+
|
|
338
|
+
# Handle High-Level Ops (List format)
|
|
339
|
+
if isinstance(code, list) and not hasattr(code, "instructions"):
|
|
340
|
+
if debug or self.debug:
|
|
341
|
+
print("[VM] Executing High-Level Ops")
|
|
342
|
+
try:
|
|
343
|
+
# Run purely async internally, execute blocks
|
|
344
|
+
return asyncio.run(self._run_high_level_ops(code, debug or self.debug))
|
|
345
|
+
except Exception as e:
|
|
346
|
+
if debug or self.debug: print(f"[VM HL Error] {e}")
|
|
347
|
+
raise e
|
|
348
|
+
|
|
349
|
+
# Handle Low-Level Bytecode (Bytecode Object)
|
|
350
|
+
try:
|
|
351
|
+
execution_mode = self._select_execution_mode(code)
|
|
352
|
+
self._mode_usage[execution_mode.value] += 1
|
|
353
|
+
|
|
354
|
+
if debug or self.debug:
|
|
355
|
+
print(f"[VM] Executing Bytecode | Mode: {execution_mode.value}")
|
|
356
|
+
|
|
357
|
+
# 1. Register Mode (Optimized)
|
|
358
|
+
if execution_mode == VMMode.REGISTER and self._register_vm:
|
|
359
|
+
result = self._execute_register(code, debug)
|
|
360
|
+
|
|
361
|
+
# 2. Parallel Mode (Multi-core)
|
|
362
|
+
elif execution_mode == VMMode.PARALLEL and self._parallel_vm:
|
|
363
|
+
result = self._execute_parallel(code, debug)
|
|
364
|
+
|
|
365
|
+
# 3. Stack Mode (Standard/Fallback + Async Support)
|
|
366
|
+
else:
|
|
367
|
+
result = asyncio.run(self._execute_stack(code, debug))
|
|
368
|
+
|
|
369
|
+
# JIT Tracking
|
|
370
|
+
if self.use_jit and hasattr(code, 'instructions'):
|
|
371
|
+
execution_time = time.perf_counter() - start_time
|
|
372
|
+
self._track_execution_for_jit(code, execution_time, execution_mode)
|
|
373
|
+
|
|
374
|
+
return result
|
|
375
|
+
|
|
376
|
+
finally:
|
|
377
|
+
self._total_execution_time += (time.perf_counter() - start_time)
|
|
378
|
+
|
|
379
|
+
def _select_execution_mode(self, code) -> VMMode:
|
|
380
|
+
if self.mode != VMMode.AUTO:
|
|
381
|
+
return self.mode
|
|
382
|
+
|
|
383
|
+
if hasattr(code, 'instructions'):
|
|
384
|
+
instructions = code.instructions
|
|
385
|
+
if self._parallel_vm and self._is_parallelizable(instructions):
|
|
386
|
+
return VMMode.PARALLEL
|
|
387
|
+
if self._register_vm and self._is_register_friendly(instructions):
|
|
388
|
+
return VMMode.REGISTER
|
|
389
|
+
|
|
390
|
+
return VMMode.STACK
|
|
391
|
+
|
|
392
|
+
# ==================== Specialized Execution Methods ====================
|
|
393
|
+
|
|
394
|
+
async def _execute_stack(self, code, debug: bool = False):
|
|
395
|
+
"""Async wrapper for the core stack VM"""
|
|
396
|
+
if hasattr(code, "instructions"):
|
|
397
|
+
return await self._run_stack_bytecode(code, debug)
|
|
398
|
+
return None
|
|
399
|
+
|
|
400
|
+
def _execute_register(self, bytecode, debug: bool = False):
|
|
401
|
+
"""Execute using register-based VM"""
|
|
402
|
+
try:
|
|
403
|
+
# Ensure register VM has current environment and builtins
|
|
404
|
+
self._register_vm.env = self.env.copy()
|
|
405
|
+
self._register_vm.builtins = self.builtins.copy()
|
|
406
|
+
if hasattr(self._register_vm, '_parent_env'):
|
|
407
|
+
self._register_vm._parent_env = self._parent_env
|
|
408
|
+
|
|
409
|
+
result = self._register_vm.execute(bytecode)
|
|
410
|
+
|
|
411
|
+
# Sync back environment changes
|
|
412
|
+
self.env.update(self._register_vm.env)
|
|
413
|
+
|
|
414
|
+
return result
|
|
415
|
+
except Exception as e:
|
|
416
|
+
if debug: print(f"[VM Register] Failed: {e}, falling back to stack")
|
|
417
|
+
return asyncio.run(self._run_stack_bytecode(bytecode, debug))
|
|
418
|
+
|
|
419
|
+
def _execute_parallel(self, bytecode, debug: bool = False):
|
|
420
|
+
"""Execute using parallel VM"""
|
|
421
|
+
try:
|
|
422
|
+
return self._parallel_vm.execute_parallel(
|
|
423
|
+
bytecode,
|
|
424
|
+
initial_state={"env": self.env.copy(), "builtins": self.builtins.copy(), "parent_env": self._parent_env}
|
|
425
|
+
)
|
|
426
|
+
except Exception as e:
|
|
427
|
+
if debug: print(f"[VM Parallel] Failed: {e}, falling back to stack")
|
|
428
|
+
return asyncio.run(self._run_stack_bytecode(bytecode, debug))
|
|
429
|
+
|
|
430
|
+
# ==================== JIT & Optimization Heuristics ====================
|
|
431
|
+
|
|
432
|
+
def _is_parallelizable(self, instructions) -> bool:
|
|
433
|
+
if len(instructions) < 100: return False
|
|
434
|
+
independent_ops = sum(1 for op, _ in instructions if op in ['LOAD_CONST', 'ADD', 'SUB', 'MUL', 'HASH_BLOCK'])
|
|
435
|
+
return independent_ops / len(instructions) > 0.3
|
|
436
|
+
|
|
437
|
+
def _is_register_friendly(self, instructions) -> bool:
|
|
438
|
+
arith_ops = sum(1 for op, _ in instructions if op in ['ADD', 'SUB', 'MUL', 'DIV', 'EQ', 'LT'])
|
|
439
|
+
return arith_ops / max(len(instructions), 1) > 0.4
|
|
440
|
+
|
|
441
|
+
def _track_execution_for_jit(self, bytecode, execution_time: float, execution_mode: VMMode):
|
|
442
|
+
if not self.use_jit or not self.jit_compiler: return
|
|
443
|
+
|
|
444
|
+
with self._jit_lock:
|
|
445
|
+
hot_path_info = self.jit_compiler.track_execution(bytecode, execution_time)
|
|
446
|
+
bytecode_hash = getattr(hot_path_info, 'bytecode_hash', None) or self.jit_compiler._hash_bytecode(bytecode)
|
|
447
|
+
|
|
448
|
+
if bytecode_hash not in self._jit_execution_stats:
|
|
449
|
+
self._jit_execution_stats[bytecode_hash] = []
|
|
450
|
+
self._jit_execution_stats[bytecode_hash].append(execution_time)
|
|
451
|
+
|
|
452
|
+
# Check if should compile (outside lock to avoid holding during compilation)
|
|
453
|
+
should_compile = self.jit_compiler.should_compile(bytecode_hash)
|
|
454
|
+
|
|
455
|
+
# Compile outside the lock to prevent blocking other executions
|
|
456
|
+
if should_compile:
|
|
457
|
+
if self.debug: print(f"[VM JIT] Compiling hot path: {bytecode_hash[:8]}")
|
|
458
|
+
with self._jit_lock:
|
|
459
|
+
# Double-check it hasn't been compiled by another thread
|
|
460
|
+
if self.jit_compiler.should_compile(bytecode_hash):
|
|
461
|
+
self.jit_compiler.compile_hot_path(bytecode)
|
|
462
|
+
|
|
463
|
+
def get_jit_stats(self) -> Dict[str, Any]:
|
|
464
|
+
if self.use_jit and self.jit_compiler:
|
|
465
|
+
stats = self.jit_compiler.get_stats()
|
|
466
|
+
stats['vm_hot_paths_tracked'] = len(self._jit_execution_stats)
|
|
467
|
+
stats['jit_enabled'] = True
|
|
468
|
+
return stats
|
|
469
|
+
return {'jit_enabled': False}
|
|
470
|
+
|
|
471
|
+
def clear_jit_cache(self):
|
|
472
|
+
if self.use_jit and self.jit_compiler:
|
|
473
|
+
with self._jit_lock:
|
|
474
|
+
self.jit_compiler.clear_cache()
|
|
475
|
+
self._jit_execution_stats.clear()
|
|
476
|
+
|
|
477
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
478
|
+
"""Get comprehensive VM statistics"""
|
|
479
|
+
stats = {
|
|
480
|
+
'execution_count': self._execution_count,
|
|
481
|
+
'total_execution_time': self._total_execution_time,
|
|
482
|
+
'mode_usage': self._mode_usage.copy(),
|
|
483
|
+
'jit_enabled': self.use_jit,
|
|
484
|
+
'memory_manager_enabled': self.use_memory_manager
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
if self.use_jit:
|
|
488
|
+
stats['jit_stats'] = self.get_jit_stats()
|
|
489
|
+
|
|
490
|
+
if self.use_memory_manager:
|
|
491
|
+
stats['memory_stats'] = self.get_memory_stats()
|
|
492
|
+
|
|
493
|
+
return stats
|
|
494
|
+
|
|
495
|
+
# ==================== Memory Management API ====================
|
|
496
|
+
|
|
497
|
+
def get_memory_stats(self) -> Dict[str, Any]:
|
|
498
|
+
if self.use_memory_manager and self.memory_manager:
|
|
499
|
+
with self._memory_lock:
|
|
500
|
+
stats = self.memory_manager.get_stats()
|
|
501
|
+
stats['managed_objects_count'] = len(self._managed_objects)
|
|
502
|
+
return stats
|
|
503
|
+
return {'memory_manager_enabled': False}
|
|
504
|
+
|
|
505
|
+
def get_memory_report(self) -> str:
|
|
506
|
+
"""Get detailed memory report"""
|
|
507
|
+
if self.use_memory_manager and self.memory_manager:
|
|
508
|
+
stats = self.get_memory_stats()
|
|
509
|
+
report = f"Memory Manager Report:\n"
|
|
510
|
+
report += f" Managed Objects: {stats.get('managed_objects_count', 0)}\n"
|
|
511
|
+
report += f" Total Allocations: {stats.get('total_allocations', 0)}\n"
|
|
512
|
+
report += f" Active Objects: {stats.get('active_objects', 0)}\n"
|
|
513
|
+
return report
|
|
514
|
+
return "Memory manager disabled"
|
|
515
|
+
|
|
516
|
+
def collect_garbage(self, force: bool = False) -> Dict[str, Any]:
|
|
517
|
+
if self.use_memory_manager and self.memory_manager:
|
|
518
|
+
collected, gc_time = self.memory_manager.collect_garbage(force=force)
|
|
519
|
+
# Cleanup local references to collected objects
|
|
520
|
+
collected_ids = getattr(self.memory_manager, '_last_collected_ids', set())
|
|
521
|
+
for name, obj_id in list(self._managed_objects.items()):
|
|
522
|
+
if obj_id in collected_ids:
|
|
523
|
+
del self._managed_objects[name]
|
|
524
|
+
return {'collected': collected, 'gc_time': gc_time}
|
|
525
|
+
|
|
526
|
+
# Fallback: Manual environment cleanup for non-managed memory
|
|
527
|
+
# Clear variables that are no longer referenced
|
|
528
|
+
if force:
|
|
529
|
+
initial_count = len(self.env)
|
|
530
|
+
# Keep only builtins and parent env references
|
|
531
|
+
keys_to_remove = []
|
|
532
|
+
for key in list(self.env.keys()):
|
|
533
|
+
# Don't remove special keys or builtins
|
|
534
|
+
if not key.startswith('_') and key not in self.builtins:
|
|
535
|
+
keys_to_remove.append(key)
|
|
536
|
+
|
|
537
|
+
# Remove temporary variables
|
|
538
|
+
for key in keys_to_remove:
|
|
539
|
+
del self.env[key]
|
|
540
|
+
|
|
541
|
+
cleared = initial_count - len(self.env)
|
|
542
|
+
return {'collected': cleared, 'message': 'Environment variables cleared'}
|
|
543
|
+
|
|
544
|
+
return {'collected': 0, 'message': 'Memory manager disabled or not forced'}
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
def _allocate_managed(self, value: Any, name: str = None, root: bool = False) -> int:
|
|
548
|
+
if not self.use_memory_manager or not self.memory_manager: return -1
|
|
549
|
+
try:
|
|
550
|
+
with self._memory_lock:
|
|
551
|
+
if name and name in self._managed_objects:
|
|
552
|
+
self.memory_manager.deallocate(self._managed_objects[name])
|
|
553
|
+
obj_id = self.memory_manager.allocate(value, root=root)
|
|
554
|
+
if name: self._managed_objects[name] = obj_id
|
|
555
|
+
return obj_id
|
|
556
|
+
except Exception:
|
|
557
|
+
return -1
|
|
558
|
+
|
|
559
|
+
def _get_managed(self, name: str) -> Any:
|
|
560
|
+
if not self.use_memory_manager or not self.memory_manager: return None
|
|
561
|
+
with self._memory_lock:
|
|
562
|
+
obj_id = self._managed_objects.get(name)
|
|
563
|
+
if obj_id is not None:
|
|
564
|
+
return self.memory_manager.get(obj_id)
|
|
565
|
+
return None
|
|
566
|
+
|
|
567
|
+
# ==================== Core Execution: High-Level Ops ====================
|
|
568
|
+
|
|
569
|
+
async def _run_high_level_ops(self, ops: List[Tuple], debug: bool = False):
|
|
570
|
+
last = None
|
|
571
|
+
for i, op in enumerate(ops):
|
|
572
|
+
if not isinstance(op, (list, tuple)) or len(op) == 0: continue
|
|
573
|
+
code = op[0]
|
|
574
|
+
if debug: print(f"[VM HL] op#{i}: {op}")
|
|
575
|
+
try:
|
|
576
|
+
if code == "DEFINE_SCREEN":
|
|
577
|
+
_, name, props = op
|
|
578
|
+
if _BACKEND_AVAILABLE: _BACKEND.define_screen(name, props)
|
|
579
|
+
else: self.env.setdefault("screens", {})[name] = props
|
|
580
|
+
last = None
|
|
581
|
+
elif code == "DEFINE_COMPONENT":
|
|
582
|
+
_, name, props = op
|
|
583
|
+
if _BACKEND_AVAILABLE: _BACKEND.define_component(name, props)
|
|
584
|
+
else: self.env.setdefault("components", {})[name] = props
|
|
585
|
+
last = None
|
|
586
|
+
elif code == "DEFINE_THEME":
|
|
587
|
+
_, name, props = op
|
|
588
|
+
self.env.setdefault("themes", {})[name] = props
|
|
589
|
+
elif code == "CALL_BUILTIN":
|
|
590
|
+
_, name, arg_ops = op
|
|
591
|
+
args = [self._eval_hl_op(a) for a in arg_ops]
|
|
592
|
+
last = await self._call_builtin_async(name, args)
|
|
593
|
+
elif code == "LET":
|
|
594
|
+
_, name, val_op = op
|
|
595
|
+
val = self._eval_hl_op(val_op)
|
|
596
|
+
# If val is a coroutine, await it
|
|
597
|
+
if asyncio.iscoroutine(val) or isinstance(val, asyncio.Future):
|
|
598
|
+
val = await val
|
|
599
|
+
self.env[name] = val
|
|
600
|
+
last = None
|
|
601
|
+
elif code == "EXPR":
|
|
602
|
+
_, expr_op = op
|
|
603
|
+
last = self._eval_hl_op(expr_op)
|
|
604
|
+
# If last is a coroutine, await it
|
|
605
|
+
if asyncio.iscoroutine(last) or isinstance(last, asyncio.Future):
|
|
606
|
+
last = await last
|
|
607
|
+
elif code == "REGISTER_EVENT":
|
|
608
|
+
_, name, props = op
|
|
609
|
+
self._events.setdefault(name, [])
|
|
610
|
+
elif code == "EMIT_EVENT":
|
|
611
|
+
_, name, payload_op = op
|
|
612
|
+
payload = self._eval_hl_op(payload_op)
|
|
613
|
+
handlers = self._events.get(name, [])
|
|
614
|
+
for h in handlers:
|
|
615
|
+
await self._call_builtin_async(h, [payload])
|
|
616
|
+
elif code == "IMPORT":
|
|
617
|
+
_, module_path, alias = op
|
|
618
|
+
try:
|
|
619
|
+
mod = importlib.import_module(module_path)
|
|
620
|
+
self.env[alias or module_path] = mod
|
|
621
|
+
except Exception:
|
|
622
|
+
self.env[alias or module_path] = None
|
|
623
|
+
elif code == "DEFINE_ENUM":
|
|
624
|
+
_, name, members = op
|
|
625
|
+
self.env.setdefault("enums", {})[name] = members
|
|
626
|
+
elif code == "DEFINE_PROTOCOL":
|
|
627
|
+
_, name, spec = op
|
|
628
|
+
self.env.setdefault("protocols", {})[name] = spec
|
|
629
|
+
elif code == "AWAIT":
|
|
630
|
+
_, inner_op = op
|
|
631
|
+
evaluated = self._eval_hl_op(inner_op)
|
|
632
|
+
last = await evaluated if (asyncio.iscoroutine(evaluated) or isinstance(evaluated, asyncio.Future)) else evaluated
|
|
633
|
+
else:
|
|
634
|
+
last = None
|
|
635
|
+
except Exception as e:
|
|
636
|
+
last = e
|
|
637
|
+
return last
|
|
638
|
+
|
|
639
|
+
def _eval_hl_op(self, op):
|
|
640
|
+
if not isinstance(op, tuple): return op
|
|
641
|
+
tag = op[0]
|
|
642
|
+
if tag == "LITERAL": return op[1]
|
|
643
|
+
if tag == "IDENT":
|
|
644
|
+
name = op[1]
|
|
645
|
+
if name in self.env: return self.env[name]
|
|
646
|
+
if name in self.builtins: return self.builtins[name]
|
|
647
|
+
return None
|
|
648
|
+
if tag == "CALL_BUILTIN":
|
|
649
|
+
name = op[1]; args = [self._eval_hl_op(a) for a in op[2]]
|
|
650
|
+
# Return a coroutine instead of calling asyncio.run() - let caller handle await
|
|
651
|
+
target = self.builtins.get(name) or self.env.get(name)
|
|
652
|
+
if asyncio.iscoroutinefunction(target):
|
|
653
|
+
return target(*args)
|
|
654
|
+
elif callable(target):
|
|
655
|
+
result = target(*args)
|
|
656
|
+
if asyncio.iscoroutine(result):
|
|
657
|
+
return result
|
|
658
|
+
return result
|
|
659
|
+
return None
|
|
660
|
+
if tag == "MAP": return {k: self._eval_hl_op(v) for k, v in op[1].items()}
|
|
661
|
+
if tag == "LIST": return [self._eval_hl_op(e) for e in op[1]]
|
|
662
|
+
return None
|
|
663
|
+
|
|
664
|
+
# ==================== Core Execution: Stack Bytecode ====================
|
|
665
|
+
|
|
666
|
+
async def _run_stack_bytecode(self, bytecode, debug=False):
|
|
667
|
+
# 1. JIT Check (with thread safety)
|
|
668
|
+
if self.use_jit and self.jit_compiler:
|
|
669
|
+
with self._jit_lock:
|
|
670
|
+
bytecode_hash = self.jit_compiler._hash_bytecode(bytecode)
|
|
671
|
+
jit_function = self.jit_compiler.compilation_cache.get(bytecode_hash)
|
|
672
|
+
|
|
673
|
+
if jit_function:
|
|
674
|
+
try:
|
|
675
|
+
start_t = time.perf_counter()
|
|
676
|
+
stack = []
|
|
677
|
+
result = jit_function(self, stack, self.env)
|
|
678
|
+
with self._jit_lock:
|
|
679
|
+
self.jit_compiler.record_execution_time(bytecode_hash, time.perf_counter() - start_t, ExecutionTier.JIT_NATIVE)
|
|
680
|
+
if debug: print(f"[VM JIT] Executed cached function")
|
|
681
|
+
return result
|
|
682
|
+
except Exception as e:
|
|
683
|
+
if debug: print(f"[VM JIT] Failed: {e}, falling back")
|
|
684
|
+
|
|
685
|
+
# 2. Bytecode Execution Setup
|
|
686
|
+
consts = list(getattr(bytecode, "constants", []))
|
|
687
|
+
instrs = list(getattr(bytecode, "instructions", []))
|
|
688
|
+
ip = 0
|
|
689
|
+
stack: List[Any] = []
|
|
690
|
+
|
|
691
|
+
def const(idx): return consts[idx] if 0 <= idx < len(consts) else None
|
|
692
|
+
|
|
693
|
+
# Lexical Resolution Helper (Closures/Cells)
|
|
694
|
+
def _resolve(name):
|
|
695
|
+
# 1. Local
|
|
696
|
+
if name in self.env:
|
|
697
|
+
val = self.env[name]
|
|
698
|
+
return val.value if isinstance(val, Cell) else val
|
|
699
|
+
# 2. Closure Cells (attached to VM)
|
|
700
|
+
if name in self._closure_cells:
|
|
701
|
+
return self._closure_cells[name].value
|
|
702
|
+
# 3. Parent Chain
|
|
703
|
+
p = self._parent_env
|
|
704
|
+
while p is not None:
|
|
705
|
+
if isinstance(p, VM):
|
|
706
|
+
if name in p.env:
|
|
707
|
+
val = p.env[name]
|
|
708
|
+
return val.value if isinstance(val, Cell) else val
|
|
709
|
+
if name in p._closure_cells:
|
|
710
|
+
return p._closure_cells[name].value
|
|
711
|
+
p = p._parent_env
|
|
712
|
+
else:
|
|
713
|
+
if name in p: return p[name]
|
|
714
|
+
p = None
|
|
715
|
+
return None
|
|
716
|
+
|
|
717
|
+
def _store(name, value):
|
|
718
|
+
# Update existing Cell in local env
|
|
719
|
+
if name in self.env and isinstance(self.env[name], Cell):
|
|
720
|
+
self.env[name].value = value; return
|
|
721
|
+
# Update local non-cell
|
|
722
|
+
if name in self.env:
|
|
723
|
+
self.env[name] = value; return
|
|
724
|
+
# Update Closure Cell
|
|
725
|
+
if name in self._closure_cells:
|
|
726
|
+
self._closure_cells[name].value = value; return
|
|
727
|
+
# Update Parent Chain
|
|
728
|
+
p = self._parent_env
|
|
729
|
+
while p is not None:
|
|
730
|
+
if isinstance(p, VM):
|
|
731
|
+
if name in p._closure_cells:
|
|
732
|
+
p._closure_cells[name].value = value; return
|
|
733
|
+
if name in p.env:
|
|
734
|
+
p.env[name] = value; return
|
|
735
|
+
p = p._parent_env
|
|
736
|
+
else:
|
|
737
|
+
if name in p:
|
|
738
|
+
p[name] = value; return
|
|
739
|
+
p = None
|
|
740
|
+
# Default: Create local
|
|
741
|
+
self.env[name] = value
|
|
742
|
+
|
|
743
|
+
# 3. Execution Loop
|
|
744
|
+
prev_ip = None
|
|
745
|
+
while ip < len(instrs):
|
|
746
|
+
op, operand = instrs[ip]
|
|
747
|
+
if debug: print(f"[VM SL] ip={ip} op={op} operand={operand} stack={stack}")
|
|
748
|
+
|
|
749
|
+
# Profile instruction (if enabled) - start timing
|
|
750
|
+
instr_start_time = None
|
|
751
|
+
if self.enable_profiling and self.profiler and self.profiler.enabled:
|
|
752
|
+
if self.profiler.level in (ProfilingLevel.DETAILED, ProfilingLevel.FULL):
|
|
753
|
+
instr_start_time = time.perf_counter()
|
|
754
|
+
# Record instruction (count only for BASIC level)
|
|
755
|
+
self.profiler.record_instruction(ip, op, operand, prev_ip, len(stack))
|
|
756
|
+
|
|
757
|
+
prev_ip = ip
|
|
758
|
+
ip += 1
|
|
759
|
+
|
|
760
|
+
# --- Basic Stack Ops ---
|
|
761
|
+
if op == "LOAD_CONST":
|
|
762
|
+
stack.append(const(operand))
|
|
763
|
+
elif op == "LOAD_NAME":
|
|
764
|
+
name = const(operand)
|
|
765
|
+
stack.append(_resolve(name))
|
|
766
|
+
elif op == "STORE_NAME":
|
|
767
|
+
name = const(operand)
|
|
768
|
+
val = stack.pop() if stack else None
|
|
769
|
+
_store(name, val)
|
|
770
|
+
if self.use_memory_manager and val is not None:
|
|
771
|
+
self._allocate_managed(val, name=name)
|
|
772
|
+
elif op == "POP":
|
|
773
|
+
if stack: stack.pop()
|
|
774
|
+
elif op == "DUP":
|
|
775
|
+
if stack: stack.append(stack[-1])
|
|
776
|
+
elif op == "PRINT":
|
|
777
|
+
val = stack.pop() if stack else None
|
|
778
|
+
print(val)
|
|
779
|
+
|
|
780
|
+
# --- Function/Closure Ops ---
|
|
781
|
+
elif op == "STORE_FUNC":
|
|
782
|
+
name_idx, func_idx = operand
|
|
783
|
+
name = const(name_idx)
|
|
784
|
+
func_desc = const(func_idx)
|
|
785
|
+
# Create func descriptor, capturing current VM as parent
|
|
786
|
+
func_desc_copy = dict(func_desc) if isinstance(func_desc, dict) else {"bytecode": func_desc}
|
|
787
|
+
func_desc_copy["parent_vm"] = self
|
|
788
|
+
self.env[name] = func_desc_copy
|
|
789
|
+
|
|
790
|
+
elif op == "CALL_NAME":
|
|
791
|
+
name_idx, arg_count = operand
|
|
792
|
+
func_name = const(name_idx)
|
|
793
|
+
args = [stack.pop() for _ in range(arg_count)][::-1] if arg_count else []
|
|
794
|
+
fn = _resolve(func_name) or self.builtins.get(func_name)
|
|
795
|
+
res = await self._invoke_callable_or_funcdesc(fn, args)
|
|
796
|
+
stack.append(res)
|
|
797
|
+
|
|
798
|
+
elif op == "CALL_TOP":
|
|
799
|
+
arg_count = operand
|
|
800
|
+
args = [stack.pop() for _ in range(arg_count)][::-1] if arg_count else []
|
|
801
|
+
fn_obj = stack.pop() if stack else None
|
|
802
|
+
res = await self._invoke_callable_or_funcdesc(fn_obj, args)
|
|
803
|
+
stack.append(res)
|
|
804
|
+
|
|
805
|
+
# --- Arithmetic & Logic ---
|
|
806
|
+
elif op == "ADD":
|
|
807
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
808
|
+
# Auto-unwrap evaluator objects
|
|
809
|
+
if hasattr(a, 'value'): a = a.value
|
|
810
|
+
if hasattr(b, 'value'): b = b.value
|
|
811
|
+
stack.append(a + b)
|
|
812
|
+
elif op == "SUB":
|
|
813
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
814
|
+
if hasattr(a, 'value'): a = a.value
|
|
815
|
+
if hasattr(b, 'value'): b = b.value
|
|
816
|
+
stack.append(a - b)
|
|
817
|
+
elif op == "MUL":
|
|
818
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
819
|
+
if hasattr(a, 'value'): a = a.value
|
|
820
|
+
if hasattr(b, 'value'): b = b.value
|
|
821
|
+
stack.append(a * b)
|
|
822
|
+
elif op == "DIV":
|
|
823
|
+
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
824
|
+
if hasattr(a, 'value'): a = a.value
|
|
825
|
+
if hasattr(b, 'value'): b = b.value
|
|
826
|
+
stack.append(a / b if b != 0 else 0)
|
|
827
|
+
elif op == "MOD":
|
|
828
|
+
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
829
|
+
stack.append(a % b if b != 0 else 0)
|
|
830
|
+
elif op == "POW":
|
|
831
|
+
b = stack.pop() if stack else 1; a = stack.pop() if stack else 0
|
|
832
|
+
stack.append(a ** b)
|
|
833
|
+
elif op == "NEG":
|
|
834
|
+
a = stack.pop() if stack else 0
|
|
835
|
+
stack.append(-a)
|
|
836
|
+
elif op == "EQ":
|
|
837
|
+
b = stack.pop() if stack else None; a = stack.pop() if stack else None
|
|
838
|
+
stack.append(a == b)
|
|
839
|
+
elif op == "NEQ":
|
|
840
|
+
b = stack.pop() if stack else None; a = stack.pop() if stack else None
|
|
841
|
+
stack.append(a != b)
|
|
842
|
+
elif op == "LT":
|
|
843
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
844
|
+
stack.append(a < b)
|
|
845
|
+
elif op == "GT":
|
|
846
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
847
|
+
stack.append(a > b)
|
|
848
|
+
elif op == "LTE":
|
|
849
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
850
|
+
stack.append(a <= b)
|
|
851
|
+
elif op == "GTE":
|
|
852
|
+
b = stack.pop() if stack else 0; a = stack.pop() if stack else 0
|
|
853
|
+
stack.append(a >= b)
|
|
854
|
+
elif op == "NOT":
|
|
855
|
+
a = stack.pop() if stack else False
|
|
856
|
+
stack.append(not a)
|
|
857
|
+
|
|
858
|
+
# --- Control Flow ---
|
|
859
|
+
elif op == "JUMP":
|
|
860
|
+
ip = operand
|
|
861
|
+
elif op == "JUMP_IF_FALSE":
|
|
862
|
+
cond = stack.pop() if stack else None
|
|
863
|
+
if not cond: ip = operand
|
|
864
|
+
elif op == "RETURN":
|
|
865
|
+
return stack.pop() if stack else None
|
|
866
|
+
|
|
867
|
+
# --- Collections ---
|
|
868
|
+
elif op == "BUILD_LIST":
|
|
869
|
+
count = operand if operand is not None else 0
|
|
870
|
+
elements = [stack.pop() for _ in range(count)][::-1]
|
|
871
|
+
stack.append(elements)
|
|
872
|
+
elif op == "BUILD_MAP":
|
|
873
|
+
count = operand if operand is not None else 0
|
|
874
|
+
result = {}
|
|
875
|
+
for _ in range(count):
|
|
876
|
+
val = stack.pop(); key = stack.pop()
|
|
877
|
+
result[key] = val
|
|
878
|
+
stack.append(result)
|
|
879
|
+
elif op == "INDEX":
|
|
880
|
+
idx = stack.pop(); obj = stack.pop()
|
|
881
|
+
try: stack.append(obj[idx] if obj is not None else None)
|
|
882
|
+
except (IndexError, KeyError, TypeError): stack.append(None)
|
|
883
|
+
elif op == "GET_LENGTH":
|
|
884
|
+
obj = stack.pop()
|
|
885
|
+
try:
|
|
886
|
+
if obj is None:
|
|
887
|
+
stack.append(0)
|
|
888
|
+
elif hasattr(obj, '__len__'):
|
|
889
|
+
stack.append(len(obj))
|
|
890
|
+
else:
|
|
891
|
+
stack.append(0)
|
|
892
|
+
except (TypeError, AttributeError):
|
|
893
|
+
stack.append(0)
|
|
894
|
+
|
|
895
|
+
# --- Async & Events ---
|
|
896
|
+
elif op == "SPAWN":
|
|
897
|
+
# operand: tuple ("CALL", func_name, arg_count) OR index
|
|
898
|
+
task_handle = None
|
|
899
|
+
if isinstance(operand, tuple) and operand[0] == "CALL":
|
|
900
|
+
fn_name = operand[1]; arg_count = operand[2]
|
|
901
|
+
args = [stack.pop() for _ in range(arg_count)][::-1]
|
|
902
|
+
fn = self.builtins.get(fn_name) or self.env.get(fn_name)
|
|
903
|
+
coro = self._to_coro(fn, args)
|
|
904
|
+
|
|
905
|
+
# Use async optimizer if available
|
|
906
|
+
if self.async_optimizer:
|
|
907
|
+
coro = self.async_optimizer.spawn(coro)
|
|
908
|
+
task = asyncio.create_task(coro)
|
|
909
|
+
else:
|
|
910
|
+
task = asyncio.create_task(coro)
|
|
911
|
+
|
|
912
|
+
self._task_counter += 1
|
|
913
|
+
tid = f"task_{self._task_counter}"
|
|
914
|
+
self._tasks[tid] = task
|
|
915
|
+
task_handle = tid
|
|
916
|
+
stack.append(task_handle)
|
|
917
|
+
|
|
918
|
+
elif op == "AWAIT":
|
|
919
|
+
# Keep popping until we find a task to await
|
|
920
|
+
result_found = False
|
|
921
|
+
temp_stack = []
|
|
922
|
+
|
|
923
|
+
while stack and not result_found:
|
|
924
|
+
top = stack.pop()
|
|
925
|
+
|
|
926
|
+
if isinstance(top, str) and top in self._tasks:
|
|
927
|
+
# Use async optimizer if available
|
|
928
|
+
if self.async_optimizer:
|
|
929
|
+
res = await self.async_optimizer.await_optimized(self._tasks[top])
|
|
930
|
+
else:
|
|
931
|
+
res = await self._tasks[top]
|
|
932
|
+
# Push back any non-task values we skipped
|
|
933
|
+
for val in reversed(temp_stack):
|
|
934
|
+
stack.append(val)
|
|
935
|
+
stack.append(res)
|
|
936
|
+
result_found = True
|
|
937
|
+
elif asyncio.iscoroutine(top) or isinstance(top, asyncio.Future):
|
|
938
|
+
# Use async optimizer if available
|
|
939
|
+
if self.async_optimizer:
|
|
940
|
+
res = await self.async_optimizer.await_optimized(top)
|
|
941
|
+
else:
|
|
942
|
+
res = await top
|
|
943
|
+
# Push back any non-task values we skipped
|
|
944
|
+
for val in reversed(temp_stack):
|
|
945
|
+
stack.append(val)
|
|
946
|
+
stack.append(res)
|
|
947
|
+
result_found = True
|
|
948
|
+
else:
|
|
949
|
+
# Not a task, save it and keep looking
|
|
950
|
+
temp_stack.append(top)
|
|
951
|
+
|
|
952
|
+
# If no task was found, put everything back
|
|
953
|
+
if not result_found:
|
|
954
|
+
for val in reversed(temp_stack):
|
|
955
|
+
stack.append(val)
|
|
956
|
+
|
|
957
|
+
elif op == "REGISTER_EVENT":
|
|
958
|
+
event_name = const(operand[0]) if isinstance(operand, (list,tuple)) else const(operand)
|
|
959
|
+
handler = const(operand[1]) if isinstance(operand, (list,tuple)) else None
|
|
960
|
+
self._events.setdefault(event_name, []).append(handler)
|
|
961
|
+
|
|
962
|
+
elif op == "EMIT_EVENT":
|
|
963
|
+
event_name = const(operand[0])
|
|
964
|
+
payload = const(operand[1]) if isinstance(operand, (list,tuple)) and len(operand) > 1 else None
|
|
965
|
+
handlers = self._events.get(event_name, [])
|
|
966
|
+
for h in handlers:
|
|
967
|
+
fn = self.builtins.get(h) or self.env.get(h)
|
|
968
|
+
asyncio.create_task(self._call_builtin_async_obj(fn, [payload]))
|
|
969
|
+
|
|
970
|
+
elif op == "IMPORT":
|
|
971
|
+
mod_name = const(operand[0])
|
|
972
|
+
alias = const(operand[1]) if isinstance(operand, (list,tuple)) and len(operand) > 1 else None
|
|
973
|
+
try:
|
|
974
|
+
mod = importlib.import_module(mod_name)
|
|
975
|
+
self.env[alias or mod_name] = mod
|
|
976
|
+
except Exception:
|
|
977
|
+
self.env[alias or mod_name] = None
|
|
978
|
+
|
|
979
|
+
elif op == "DEFINE_ENUM":
|
|
980
|
+
enum_name = const(operand[0])
|
|
981
|
+
enum_map = const(operand[1])
|
|
982
|
+
self.env[enum_name] = enum_map
|
|
983
|
+
|
|
984
|
+
elif op == "ASSERT_PROTOCOL":
|
|
985
|
+
obj_name = const(operand[0])
|
|
986
|
+
spec = const(operand[1])
|
|
987
|
+
obj = self.env.get(obj_name)
|
|
988
|
+
ok = True
|
|
989
|
+
missing = []
|
|
990
|
+
for m in spec.get("methods", []):
|
|
991
|
+
if not hasattr(obj, m):
|
|
992
|
+
ok = False; missing.append(m)
|
|
993
|
+
stack.append((ok, missing))
|
|
994
|
+
|
|
995
|
+
# --- Blockchain Specific Opcodes ---
|
|
996
|
+
|
|
997
|
+
elif op == "HASH_BLOCK":
|
|
998
|
+
block_data = stack.pop() if stack else ""
|
|
999
|
+
if isinstance(block_data, dict):
|
|
1000
|
+
import json; block_data = json.dumps(block_data, sort_keys=True)
|
|
1001
|
+
if not isinstance(block_data, (bytes, str)): block_data = str(block_data)
|
|
1002
|
+
if isinstance(block_data, str): block_data = block_data.encode('utf-8')
|
|
1003
|
+
stack.append(hashlib.sha256(block_data).hexdigest())
|
|
1004
|
+
|
|
1005
|
+
elif op == "VERIFY_SIGNATURE":
|
|
1006
|
+
if len(stack) >= 3:
|
|
1007
|
+
pk = stack.pop(); msg = stack.pop(); sig = stack.pop()
|
|
1008
|
+
verify_fn = self.builtins.get("verify_sig") or self.env.get("verify_sig")
|
|
1009
|
+
if verify_fn:
|
|
1010
|
+
res = await self._invoke_callable_or_funcdesc(verify_fn, [sig, msg, pk])
|
|
1011
|
+
stack.append(res)
|
|
1012
|
+
else:
|
|
1013
|
+
# Fallback for testing
|
|
1014
|
+
expected = hashlib.sha256(str(msg).encode()).hexdigest()
|
|
1015
|
+
stack.append(sig == expected)
|
|
1016
|
+
else:
|
|
1017
|
+
stack.append(False)
|
|
1018
|
+
|
|
1019
|
+
elif op == "MERKLE_ROOT":
|
|
1020
|
+
leaf_count = operand if operand is not None else 0
|
|
1021
|
+
if leaf_count <= 0:
|
|
1022
|
+
stack.append("")
|
|
1023
|
+
else:
|
|
1024
|
+
leaves = [stack.pop() for _ in range(leaf_count)][::-1] if len(stack) >= leaf_count else []
|
|
1025
|
+
hashes = []
|
|
1026
|
+
for leaf in leaves:
|
|
1027
|
+
if isinstance(leaf, dict):
|
|
1028
|
+
import json; leaf = json.dumps(leaf, sort_keys=True)
|
|
1029
|
+
if not isinstance(leaf, (str, bytes)): leaf = str(leaf)
|
|
1030
|
+
if isinstance(leaf, str): leaf = leaf.encode('utf-8')
|
|
1031
|
+
hashes.append(hashlib.sha256(leaf).hexdigest())
|
|
1032
|
+
|
|
1033
|
+
while len(hashes) > 1:
|
|
1034
|
+
if len(hashes) % 2 != 0: hashes.append(hashes[-1])
|
|
1035
|
+
new_hashes = []
|
|
1036
|
+
for i in range(0, len(hashes), 2):
|
|
1037
|
+
combined = (hashes[i] + hashes[i+1]).encode('utf-8')
|
|
1038
|
+
new_hashes.append(hashlib.sha256(combined).hexdigest())
|
|
1039
|
+
hashes = new_hashes
|
|
1040
|
+
stack.append(hashes[0] if hashes else "")
|
|
1041
|
+
|
|
1042
|
+
elif op == "STATE_READ":
|
|
1043
|
+
key = const(operand)
|
|
1044
|
+
stack.append(self.env.setdefault("_blockchain_state", {}).get(key))
|
|
1045
|
+
|
|
1046
|
+
elif op == "STATE_WRITE":
|
|
1047
|
+
key = const(operand)
|
|
1048
|
+
val = stack.pop() if stack else None
|
|
1049
|
+
if self.env.get("_in_transaction", False):
|
|
1050
|
+
self.env.setdefault("_tx_pending_state", {})[key] = val
|
|
1051
|
+
else:
|
|
1052
|
+
self.env.setdefault("_blockchain_state", {})[key] = val
|
|
1053
|
+
|
|
1054
|
+
elif op == "TX_BEGIN":
|
|
1055
|
+
self.env["_in_transaction"] = True
|
|
1056
|
+
self.env["_tx_pending_state"] = {}
|
|
1057
|
+
self.env["_tx_snapshot"] = dict(self.env.get("_blockchain_state", {}))
|
|
1058
|
+
if self.use_memory_manager: self.env["_tx_memory_snapshot"] = dict(self._managed_objects)
|
|
1059
|
+
|
|
1060
|
+
elif op == "TX_COMMIT":
|
|
1061
|
+
if self.env.get("_in_transaction", False):
|
|
1062
|
+
self.env.setdefault("_blockchain_state", {}).update(self.env.get("_tx_pending_state", {}))
|
|
1063
|
+
self.env["_in_transaction"] = False
|
|
1064
|
+
self.env["_tx_pending_state"] = {}
|
|
1065
|
+
if "_tx_memory_snapshot" in self.env: del self.env["_tx_memory_snapshot"]
|
|
1066
|
+
|
|
1067
|
+
elif op == "TX_REVERT":
|
|
1068
|
+
if self.env.get("_in_transaction", False):
|
|
1069
|
+
self.env["_blockchain_state"] = dict(self.env.get("_tx_snapshot", {}))
|
|
1070
|
+
self.env["_in_transaction"] = False
|
|
1071
|
+
self.env["_tx_pending_state"] = {}
|
|
1072
|
+
if self.use_memory_manager and "_tx_memory_snapshot" in self.env:
|
|
1073
|
+
self._managed_objects = dict(self.env["_tx_memory_snapshot"])
|
|
1074
|
+
|
|
1075
|
+
elif op == "GAS_CHARGE":
|
|
1076
|
+
amount = operand if operand is not None else 0
|
|
1077
|
+
current = self.env.get("_gas_remaining", float('inf'))
|
|
1078
|
+
if current != float('inf'):
|
|
1079
|
+
new_gas = current - amount
|
|
1080
|
+
if new_gas < 0:
|
|
1081
|
+
# Revert if in TX
|
|
1082
|
+
if self.env.get("_in_transaction", False):
|
|
1083
|
+
self.env["_blockchain_state"] = dict(self.env.get("_tx_snapshot", {}))
|
|
1084
|
+
self.env["_in_transaction"] = False
|
|
1085
|
+
stack.append({"error": "OutOfGas", "required": amount, "remaining": current})
|
|
1086
|
+
return stack[-1]
|
|
1087
|
+
self.env["_gas_remaining"] = new_gas
|
|
1088
|
+
|
|
1089
|
+
elif op == "LEDGER_APPEND":
|
|
1090
|
+
entry = stack.pop() if stack else None
|
|
1091
|
+
if isinstance(entry, dict) and "timestamp" not in entry:
|
|
1092
|
+
entry["timestamp"] = time.time()
|
|
1093
|
+
self.env.setdefault("_ledger", []).append(entry)
|
|
1094
|
+
|
|
1095
|
+
else:
|
|
1096
|
+
if debug: print(f"[VM] Unknown Opcode: {op}")
|
|
1097
|
+
|
|
1098
|
+
# Record instruction timing (if profiling enabled)
|
|
1099
|
+
if instr_start_time is not None and self.profiler:
|
|
1100
|
+
elapsed = time.perf_counter() - instr_start_time
|
|
1101
|
+
self.profiler.measure_instruction(ip, elapsed)
|
|
1102
|
+
|
|
1103
|
+
return stack[-1] if stack else None
|
|
1104
|
+
|
|
1105
|
+
# ==================== Helpers ====================
|
|
1106
|
+
|
|
1107
|
+
async def _invoke_callable_or_funcdesc(self, fn, args, is_constant=False):
|
|
1108
|
+
# 1. Function Descriptor (VM Bytecode Closure)
|
|
1109
|
+
if isinstance(fn, dict) and "bytecode" in fn:
|
|
1110
|
+
func_bc = fn["bytecode"]
|
|
1111
|
+
params = fn.get("params", [])
|
|
1112
|
+
is_async = fn.get("is_async", False)
|
|
1113
|
+
# Use captured parent_vm (closure), fallback to self
|
|
1114
|
+
parent_env = fn.get("parent_vm", self)
|
|
1115
|
+
|
|
1116
|
+
local_env = {k: v for k, v in zip(params, args)}
|
|
1117
|
+
|
|
1118
|
+
inner_vm = VM(
|
|
1119
|
+
builtins=self.builtins,
|
|
1120
|
+
env=local_env,
|
|
1121
|
+
parent_env=parent_env,
|
|
1122
|
+
# Inherit configuration
|
|
1123
|
+
use_jit=self.use_jit,
|
|
1124
|
+
use_memory_manager=self.use_memory_manager
|
|
1125
|
+
)
|
|
1126
|
+
return await inner_vm._run_stack_bytecode(func_bc, debug=False)
|
|
1127
|
+
|
|
1128
|
+
# 2. Python Callable / Builtin Wrapper
|
|
1129
|
+
return await self._call_builtin_async_obj(fn, args)
|
|
1130
|
+
|
|
1131
|
+
async def _call_builtin_async(self, name: str, args: List[Any]):
|
|
1132
|
+
target = self.builtins.get(name) or self.env.get(name)
|
|
1133
|
+
|
|
1134
|
+
# Check Renderer Backend
|
|
1135
|
+
if _BACKEND_AVAILABLE and hasattr(_BACKEND, name):
|
|
1136
|
+
fn = getattr(_BACKEND, name)
|
|
1137
|
+
if asyncio.iscoroutinefunction(fn): return await fn(*args)
|
|
1138
|
+
return fn(*args)
|
|
1139
|
+
|
|
1140
|
+
return await self._call_builtin_async_obj(target, args)
|
|
1141
|
+
|
|
1142
|
+
async def _call_builtin_async_obj(self, fn_obj, args: List[Any]):
|
|
1143
|
+
try:
|
|
1144
|
+
if fn_obj is None: return None
|
|
1145
|
+
|
|
1146
|
+
# Extract .fn if it's a wrapper
|
|
1147
|
+
real_fn = fn_obj.fn if hasattr(fn_obj, "fn") else fn_obj
|
|
1148
|
+
|
|
1149
|
+
if not callable(real_fn): return real_fn
|
|
1150
|
+
|
|
1151
|
+
res = real_fn(*args)
|
|
1152
|
+
if asyncio.iscoroutine(res) or isinstance(res, asyncio.Future):
|
|
1153
|
+
return await res
|
|
1154
|
+
return res
|
|
1155
|
+
except Exception as e:
|
|
1156
|
+
return e
|
|
1157
|
+
|
|
1158
|
+
def _to_coro(self, fn, args):
|
|
1159
|
+
if asyncio.iscoroutinefunction(fn):
|
|
1160
|
+
return fn(*args)
|
|
1161
|
+
async def _wrap():
|
|
1162
|
+
if callable(fn): return fn(*args)
|
|
1163
|
+
return None
|
|
1164
|
+
return _wrap()
|
|
1165
|
+
|
|
1166
|
+
def profile_execution(self, bytecode, iterations: int = 1000) -> Dict[str, Any]:
|
|
1167
|
+
"""Profile execution performance across available modes"""
|
|
1168
|
+
import timeit
|
|
1169
|
+
results = {'iterations': iterations, 'modes': {}}
|
|
1170
|
+
|
|
1171
|
+
# Stack
|
|
1172
|
+
def run_stack(): return asyncio.run(self._execute_stack(bytecode))
|
|
1173
|
+
t_stack = timeit.timeit(run_stack, number=iterations)
|
|
1174
|
+
results['modes']['stack'] = {'total': t_stack, 'avg': t_stack/iterations}
|
|
1175
|
+
|
|
1176
|
+
# Register
|
|
1177
|
+
if self._register_vm:
|
|
1178
|
+
def run_reg(): return self._execute_register(bytecode)
|
|
1179
|
+
t_reg = timeit.timeit(run_reg, number=iterations)
|
|
1180
|
+
results['modes']['register'] = {'total': t_reg, 'speedup': t_stack/t_reg}
|
|
1181
|
+
|
|
1182
|
+
return results
|
|
1183
|
+
|
|
1184
|
+
# ==================== Profiler Interface ====================
|
|
1185
|
+
|
|
1186
|
+
def start_profiling(self):
|
|
1187
|
+
"""Start profiling session"""
|
|
1188
|
+
if self.profiler:
|
|
1189
|
+
self.profiler.start()
|
|
1190
|
+
|
|
1191
|
+
def stop_profiling(self):
|
|
1192
|
+
"""Stop profiling session"""
|
|
1193
|
+
if self.profiler:
|
|
1194
|
+
self.profiler.stop()
|
|
1195
|
+
|
|
1196
|
+
def get_profiling_report(self, format: str = 'text', top_n: int = 20) -> str:
|
|
1197
|
+
"""Get profiling report"""
|
|
1198
|
+
if self.profiler:
|
|
1199
|
+
return self.profiler.generate_report(format=format, top_n=top_n)
|
|
1200
|
+
return "Profiling not enabled"
|
|
1201
|
+
|
|
1202
|
+
def get_profiling_summary(self) -> Dict[str, Any]:
|
|
1203
|
+
"""Get profiling summary statistics"""
|
|
1204
|
+
if self.profiler:
|
|
1205
|
+
return self.profiler.get_summary()
|
|
1206
|
+
return {'error': 'Profiling not enabled'}
|
|
1207
|
+
|
|
1208
|
+
def reset_profiler(self):
|
|
1209
|
+
"""Reset profiler statistics"""
|
|
1210
|
+
if self.profiler:
|
|
1211
|
+
self.profiler.reset()
|
|
1212
|
+
|
|
1213
|
+
# ==================== Memory Pool Interface ====================
|
|
1214
|
+
|
|
1215
|
+
def allocate_integer(self, value: int) -> int:
|
|
1216
|
+
"""Allocate an integer from the pool"""
|
|
1217
|
+
if self.integer_pool:
|
|
1218
|
+
return self.integer_pool.get(value)
|
|
1219
|
+
return value
|
|
1220
|
+
|
|
1221
|
+
def release_integer(self, value: int):
|
|
1222
|
+
"""Release an integer back to the pool (no-op for integers)"""
|
|
1223
|
+
# IntegerPool doesn't need explicit release
|
|
1224
|
+
pass
|
|
1225
|
+
|
|
1226
|
+
def allocate_string(self, value: str) -> str:
|
|
1227
|
+
"""Allocate a string from the pool"""
|
|
1228
|
+
if self.string_pool:
|
|
1229
|
+
return self.string_pool.get(value)
|
|
1230
|
+
return value
|
|
1231
|
+
|
|
1232
|
+
def release_string(self, value: str):
|
|
1233
|
+
"""Release a string back to the pool (no-op for strings)"""
|
|
1234
|
+
# StringPool doesn't need explicit release (uses interning)
|
|
1235
|
+
pass
|
|
1236
|
+
|
|
1237
|
+
def allocate_list(self, initial_capacity: int = 0) -> list:
|
|
1238
|
+
"""Allocate a list from the pool"""
|
|
1239
|
+
if self.list_pool:
|
|
1240
|
+
return self.list_pool.acquire(initial_capacity)
|
|
1241
|
+
return [None] * initial_capacity if initial_capacity > 0 else []
|
|
1242
|
+
|
|
1243
|
+
def release_list(self, value: list):
|
|
1244
|
+
"""Release a list back to the pool"""
|
|
1245
|
+
if self.list_pool:
|
|
1246
|
+
self.list_pool.release(value)
|
|
1247
|
+
|
|
1248
|
+
def get_pool_stats(self) -> Dict[str, Any]:
|
|
1249
|
+
"""Get memory pool statistics"""
|
|
1250
|
+
if not self.enable_memory_pool:
|
|
1251
|
+
return {'error': 'Memory pooling not enabled'}
|
|
1252
|
+
|
|
1253
|
+
stats = {}
|
|
1254
|
+
if self.integer_pool:
|
|
1255
|
+
stats['integer_pool'] = self.integer_pool.stats.to_dict()
|
|
1256
|
+
if self.string_pool:
|
|
1257
|
+
stats['string_pool'] = self.string_pool.stats.to_dict()
|
|
1258
|
+
if self.list_pool:
|
|
1259
|
+
stats['list_pool'] = self.list_pool.get_stats()
|
|
1260
|
+
|
|
1261
|
+
return stats
|
|
1262
|
+
|
|
1263
|
+
def reset_pools(self):
|
|
1264
|
+
"""Reset all memory pools"""
|
|
1265
|
+
if self.integer_pool:
|
|
1266
|
+
self.integer_pool.clear()
|
|
1267
|
+
if self.string_pool:
|
|
1268
|
+
self.string_pool.clear()
|
|
1269
|
+
if self.list_pool:
|
|
1270
|
+
self.list_pool.clear()
|
|
1271
|
+
|
|
1272
|
+
# ==================== Peephole Optimizer Interface ====================
|
|
1273
|
+
|
|
1274
|
+
def optimize_bytecode(self, bytecode):
|
|
1275
|
+
"""
|
|
1276
|
+
Optimize bytecode using peephole optimizer
|
|
1277
|
+
|
|
1278
|
+
Args:
|
|
1279
|
+
bytecode: Bytecode object or list of instructions
|
|
1280
|
+
|
|
1281
|
+
Returns:
|
|
1282
|
+
Optimized bytecode
|
|
1283
|
+
"""
|
|
1284
|
+
if not self.peephole_optimizer:
|
|
1285
|
+
return bytecode
|
|
1286
|
+
|
|
1287
|
+
return self.peephole_optimizer.optimize(bytecode)
|
|
1288
|
+
|
|
1289
|
+
def get_optimizer_stats(self) -> Dict[str, Any]:
|
|
1290
|
+
"""Get peephole optimizer statistics"""
|
|
1291
|
+
if not self.peephole_optimizer:
|
|
1292
|
+
return {'error': 'Peephole optimizer not enabled'}
|
|
1293
|
+
|
|
1294
|
+
return self.peephole_optimizer.stats.to_dict()
|
|
1295
|
+
|
|
1296
|
+
def reset_optimizer_stats(self):
|
|
1297
|
+
"""Reset peephole optimizer statistics"""
|
|
1298
|
+
if self.peephole_optimizer:
|
|
1299
|
+
self.peephole_optimizer.reset_stats()
|
|
1300
|
+
|
|
1301
|
+
# ==================== Async Optimizer Interface ====================
|
|
1302
|
+
|
|
1303
|
+
def get_async_stats(self) -> Dict[str, Any]:
|
|
1304
|
+
"""Get async optimizer statistics"""
|
|
1305
|
+
if not self.async_optimizer:
|
|
1306
|
+
return {'error': 'Async optimizer not enabled'}
|
|
1307
|
+
|
|
1308
|
+
return self.async_optimizer.get_stats()
|
|
1309
|
+
|
|
1310
|
+
def reset_async_stats(self):
|
|
1311
|
+
"""Reset async optimizer statistics"""
|
|
1312
|
+
if self.async_optimizer:
|
|
1313
|
+
self.async_optimizer.reset_stats()
|
|
1314
|
+
|
|
1315
|
+
# ==================== SSA & Register Allocator Interface ====================
|
|
1316
|
+
|
|
1317
|
+
def convert_to_ssa(self, instructions: List[Tuple]) -> Optional['SSAProgram']:
|
|
1318
|
+
"""
|
|
1319
|
+
Convert instructions to SSA form
|
|
1320
|
+
|
|
1321
|
+
Args:
|
|
1322
|
+
instructions: List of bytecode instructions
|
|
1323
|
+
|
|
1324
|
+
Returns:
|
|
1325
|
+
SSAProgram or None if SSA not enabled
|
|
1326
|
+
"""
|
|
1327
|
+
if not self.ssa_converter:
|
|
1328
|
+
return None
|
|
1329
|
+
|
|
1330
|
+
return self.ssa_converter.convert_to_ssa(instructions)
|
|
1331
|
+
|
|
1332
|
+
def allocate_registers(
|
|
1333
|
+
self,
|
|
1334
|
+
instructions: List[Tuple]
|
|
1335
|
+
) -> Optional['AllocationResult']:
|
|
1336
|
+
"""
|
|
1337
|
+
Allocate registers for instructions
|
|
1338
|
+
|
|
1339
|
+
Args:
|
|
1340
|
+
instructions: List of bytecode instructions
|
|
1341
|
+
|
|
1342
|
+
Returns:
|
|
1343
|
+
AllocationResult or None if register allocator not enabled
|
|
1344
|
+
"""
|
|
1345
|
+
if not self.register_allocator:
|
|
1346
|
+
return None
|
|
1347
|
+
|
|
1348
|
+
# Compute live ranges
|
|
1349
|
+
live_ranges = compute_live_ranges(instructions)
|
|
1350
|
+
|
|
1351
|
+
# Allocate registers
|
|
1352
|
+
return self.register_allocator.allocate(instructions, live_ranges)
|
|
1353
|
+
|
|
1354
|
+
def get_ssa_stats(self) -> Dict[str, Any]:
|
|
1355
|
+
"""Get SSA converter statistics"""
|
|
1356
|
+
if not self.ssa_converter:
|
|
1357
|
+
return {'error': 'SSA converter not enabled'}
|
|
1358
|
+
|
|
1359
|
+
return self.ssa_converter.get_stats()
|
|
1360
|
+
|
|
1361
|
+
def get_allocator_stats(self) -> Dict[str, Any]:
|
|
1362
|
+
"""Get register allocator statistics"""
|
|
1363
|
+
if not self.register_allocator:
|
|
1364
|
+
return {'error': 'Register allocator not enabled'}
|
|
1365
|
+
|
|
1366
|
+
return self.register_allocator.get_stats()
|
|
1367
|
+
|
|
1368
|
+
def reset_ssa_stats(self):
|
|
1369
|
+
"""Reset SSA converter statistics"""
|
|
1370
|
+
if self.ssa_converter:
|
|
1371
|
+
self.ssa_converter.reset_stats()
|
|
1372
|
+
|
|
1373
|
+
def reset_allocator_stats(self):
|
|
1374
|
+
"""Reset register allocator statistics"""
|
|
1375
|
+
if self.register_allocator:
|
|
1376
|
+
self.register_allocator.reset_stats()
|
|
1377
|
+
|
|
1378
|
+
# ==================== Factory Functions ====================
|
|
1379
|
+
|
|
1380
|
+
def create_vm(mode: str = "auto", use_jit: bool = True, **kwargs) -> VM:
|
|
1381
|
+
return VM(mode=VMMode(mode.lower()), use_jit=use_jit, **kwargs)
|
|
1382
|
+
|
|
1383
|
+
def create_high_performance_vm() -> VM:
|
|
1384
|
+
return create_vm(
|
|
1385
|
+
mode="auto",
|
|
1386
|
+
use_jit=True,
|
|
1387
|
+
use_memory_manager=True,
|
|
1388
|
+
enable_memory_pool=True,
|
|
1389
|
+
enable_peephole_optimizer=True,
|
|
1390
|
+
optimization_level="AGGRESSIVE",
|
|
1391
|
+
worker_count=4
|
|
1392
|
+
)
|