zexus 1.6.8 → 1.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (177) hide show
  1. package/README.md +12 -5
  2. package/package.json +1 -1
  3. package/src/__init__.py +7 -0
  4. package/src/zexus/__init__.py +1 -1
  5. package/src/zexus/__pycache__/__init__.cpython-312.pyc +0 -0
  6. package/src/zexus/__pycache__/capability_system.cpython-312.pyc +0 -0
  7. package/src/zexus/__pycache__/debug_sanitizer.cpython-312.pyc +0 -0
  8. package/src/zexus/__pycache__/environment.cpython-312.pyc +0 -0
  9. package/src/zexus/__pycache__/error_reporter.cpython-312.pyc +0 -0
  10. package/src/zexus/__pycache__/input_validation.cpython-312.pyc +0 -0
  11. package/src/zexus/__pycache__/lexer.cpython-312.pyc +0 -0
  12. package/src/zexus/__pycache__/module_cache.cpython-312.pyc +0 -0
  13. package/src/zexus/__pycache__/module_manager.cpython-312.pyc +0 -0
  14. package/src/zexus/__pycache__/object.cpython-312.pyc +0 -0
  15. package/src/zexus/__pycache__/security.cpython-312.pyc +0 -0
  16. package/src/zexus/__pycache__/security_enforcement.cpython-312.pyc +0 -0
  17. package/src/zexus/__pycache__/syntax_validator.cpython-312.pyc +0 -0
  18. package/src/zexus/__pycache__/zexus_ast.cpython-312.pyc +0 -0
  19. package/src/zexus/__pycache__/zexus_token.cpython-312.pyc +0 -0
  20. package/src/zexus/access_control_system/__pycache__/__init__.cpython-312.pyc +0 -0
  21. package/src/zexus/access_control_system/__pycache__/access_control.cpython-312.pyc +0 -0
  22. package/src/zexus/advanced_types.py +17 -2
  23. package/src/zexus/blockchain/__init__.py +411 -0
  24. package/src/zexus/blockchain/accelerator.py +1160 -0
  25. package/src/zexus/blockchain/chain.py +660 -0
  26. package/src/zexus/blockchain/consensus.py +821 -0
  27. package/src/zexus/blockchain/contract_vm.py +1019 -0
  28. package/src/zexus/blockchain/crypto.py +79 -14
  29. package/src/zexus/blockchain/events.py +526 -0
  30. package/src/zexus/blockchain/loadtest.py +721 -0
  31. package/src/zexus/blockchain/monitoring.py +350 -0
  32. package/src/zexus/blockchain/mpt.py +716 -0
  33. package/src/zexus/blockchain/multichain.py +951 -0
  34. package/src/zexus/blockchain/multiprocess_executor.py +338 -0
  35. package/src/zexus/blockchain/network.py +886 -0
  36. package/src/zexus/blockchain/node.py +666 -0
  37. package/src/zexus/blockchain/rpc.py +1203 -0
  38. package/src/zexus/blockchain/rust_bridge.py +421 -0
  39. package/src/zexus/blockchain/storage.py +423 -0
  40. package/src/zexus/blockchain/tokens.py +750 -0
  41. package/src/zexus/blockchain/upgradeable.py +1004 -0
  42. package/src/zexus/blockchain/verification.py +1602 -0
  43. package/src/zexus/blockchain/wallet.py +621 -0
  44. package/src/zexus/capability_system.py +184 -9
  45. package/src/zexus/cli/__pycache__/main.cpython-312.pyc +0 -0
  46. package/src/zexus/cli/main.py +383 -34
  47. package/src/zexus/cli/zpm.py +1 -1
  48. package/src/zexus/compiler/__pycache__/bytecode.cpython-312.pyc +0 -0
  49. package/src/zexus/compiler/__pycache__/lexer.cpython-312.pyc +0 -0
  50. package/src/zexus/compiler/__pycache__/parser.cpython-312.pyc +0 -0
  51. package/src/zexus/compiler/__pycache__/semantic.cpython-312.pyc +0 -0
  52. package/src/zexus/compiler/__pycache__/zexus_ast.cpython-312.pyc +0 -0
  53. package/src/zexus/compiler/bytecode.py +124 -7
  54. package/src/zexus/compiler/compat_runtime.py +6 -2
  55. package/src/zexus/compiler/lexer.py +16 -5
  56. package/src/zexus/compiler/parser.py +108 -7
  57. package/src/zexus/compiler/semantic.py +18 -19
  58. package/src/zexus/compiler/zexus_ast.py +26 -1
  59. package/src/zexus/concurrency_system.py +79 -0
  60. package/src/zexus/config.py +54 -0
  61. package/src/zexus/crypto_bridge.py +244 -8
  62. package/src/zexus/dap/__init__.py +10 -0
  63. package/src/zexus/dap/__main__.py +4 -0
  64. package/src/zexus/dap/dap_server.py +391 -0
  65. package/src/zexus/dap/debug_engine.py +298 -0
  66. package/src/zexus/environment.py +112 -9
  67. package/src/zexus/evaluator/__pycache__/bytecode_compiler.cpython-312.pyc +0 -0
  68. package/src/zexus/evaluator/__pycache__/core.cpython-312.pyc +0 -0
  69. package/src/zexus/evaluator/__pycache__/expressions.cpython-312.pyc +0 -0
  70. package/src/zexus/evaluator/__pycache__/functions.cpython-312.pyc +0 -0
  71. package/src/zexus/evaluator/__pycache__/resource_limiter.cpython-312.pyc +0 -0
  72. package/src/zexus/evaluator/__pycache__/statements.cpython-312.pyc +0 -0
  73. package/src/zexus/evaluator/__pycache__/unified_execution.cpython-312.pyc +0 -0
  74. package/src/zexus/evaluator/__pycache__/utils.cpython-312.pyc +0 -0
  75. package/src/zexus/evaluator/bytecode_compiler.py +457 -37
  76. package/src/zexus/evaluator/core.py +644 -50
  77. package/src/zexus/evaluator/expressions.py +358 -62
  78. package/src/zexus/evaluator/functions.py +458 -20
  79. package/src/zexus/evaluator/resource_limiter.py +4 -4
  80. package/src/zexus/evaluator/statements.py +774 -122
  81. package/src/zexus/evaluator/unified_execution.py +573 -72
  82. package/src/zexus/evaluator/utils.py +14 -2
  83. package/src/zexus/evaluator_original.py +1 -1
  84. package/src/zexus/event_loop.py +186 -0
  85. package/src/zexus/lexer.py +742 -458
  86. package/src/zexus/lsp/__init__.py +1 -1
  87. package/src/zexus/lsp/definition_provider.py +163 -9
  88. package/src/zexus/lsp/server.py +22 -8
  89. package/src/zexus/lsp/symbol_provider.py +182 -9
  90. package/src/zexus/module_cache.py +239 -9
  91. package/src/zexus/module_manager.py +129 -1
  92. package/src/zexus/object.py +76 -6
  93. package/src/zexus/parser/__pycache__/parser.cpython-312.pyc +0 -0
  94. package/src/zexus/parser/__pycache__/strategy_context.cpython-312.pyc +0 -0
  95. package/src/zexus/parser/__pycache__/strategy_structural.cpython-312.pyc +0 -0
  96. package/src/zexus/parser/parser.py +1349 -408
  97. package/src/zexus/parser/strategy_context.py +755 -58
  98. package/src/zexus/parser/strategy_structural.py +121 -21
  99. package/src/zexus/persistence.py +15 -1
  100. package/src/zexus/renderer/__init__.py +61 -0
  101. package/src/zexus/renderer/__pycache__/__init__.cpython-312.pyc +0 -0
  102. package/src/zexus/renderer/__pycache__/backend.cpython-312.pyc +0 -0
  103. package/src/zexus/renderer/__pycache__/canvas.cpython-312.pyc +0 -0
  104. package/src/zexus/renderer/__pycache__/color_system.cpython-312.pyc +0 -0
  105. package/src/zexus/renderer/__pycache__/layout.cpython-312.pyc +0 -0
  106. package/src/zexus/renderer/__pycache__/main_renderer.cpython-312.pyc +0 -0
  107. package/src/zexus/renderer/__pycache__/painter.cpython-312.pyc +0 -0
  108. package/src/zexus/renderer/backend.py +261 -0
  109. package/src/zexus/renderer/canvas.py +78 -0
  110. package/src/zexus/renderer/color_system.py +201 -0
  111. package/src/zexus/renderer/graphics.py +31 -0
  112. package/src/zexus/renderer/layout.py +222 -0
  113. package/src/zexus/renderer/main_renderer.py +66 -0
  114. package/src/zexus/renderer/painter.py +30 -0
  115. package/src/zexus/renderer/tk_backend.py +208 -0
  116. package/src/zexus/renderer/web_backend.py +260 -0
  117. package/src/zexus/runtime/__init__.py +10 -2
  118. package/src/zexus/runtime/__pycache__/__init__.cpython-312.pyc +0 -0
  119. package/src/zexus/runtime/__pycache__/async_runtime.cpython-312.pyc +0 -0
  120. package/src/zexus/runtime/__pycache__/load_manager.cpython-312.pyc +0 -0
  121. package/src/zexus/runtime/file_flags.py +137 -0
  122. package/src/zexus/runtime/load_manager.py +368 -0
  123. package/src/zexus/safety/__pycache__/__init__.cpython-312.pyc +0 -0
  124. package/src/zexus/safety/__pycache__/memory_safety.cpython-312.pyc +0 -0
  125. package/src/zexus/security.py +424 -34
  126. package/src/zexus/stdlib/fs.py +23 -18
  127. package/src/zexus/stdlib/http.py +289 -186
  128. package/src/zexus/stdlib/sockets.py +207 -163
  129. package/src/zexus/stdlib/websockets.py +282 -0
  130. package/src/zexus/stdlib_integration.py +369 -2
  131. package/src/zexus/strategy_recovery.py +6 -3
  132. package/src/zexus/type_checker.py +423 -0
  133. package/src/zexus/virtual_filesystem.py +189 -2
  134. package/src/zexus/vm/__init__.py +113 -3
  135. package/src/zexus/vm/__pycache__/async_optimizer.cpython-312.pyc +0 -0
  136. package/src/zexus/vm/__pycache__/bytecode.cpython-312.pyc +0 -0
  137. package/src/zexus/vm/__pycache__/bytecode_converter.cpython-312.pyc +0 -0
  138. package/src/zexus/vm/__pycache__/cache.cpython-312.pyc +0 -0
  139. package/src/zexus/vm/__pycache__/compiler.cpython-312.pyc +0 -0
  140. package/src/zexus/vm/__pycache__/gas_metering.cpython-312.pyc +0 -0
  141. package/src/zexus/vm/__pycache__/jit.cpython-312.pyc +0 -0
  142. package/src/zexus/vm/__pycache__/parallel_vm.cpython-312.pyc +0 -0
  143. package/src/zexus/vm/__pycache__/vm.cpython-312.pyc +0 -0
  144. package/src/zexus/vm/async_optimizer.py +80 -6
  145. package/src/zexus/vm/binary_bytecode.py +659 -0
  146. package/src/zexus/vm/bytecode.py +59 -11
  147. package/src/zexus/vm/bytecode_converter.py +26 -12
  148. package/src/zexus/vm/cabi.c +1985 -0
  149. package/src/zexus/vm/cabi.cpython-312-x86_64-linux-gnu.so +0 -0
  150. package/src/zexus/vm/cabi.h +127 -0
  151. package/src/zexus/vm/cache.py +561 -17
  152. package/src/zexus/vm/compiler.py +818 -51
  153. package/src/zexus/vm/fastops.c +15743 -0
  154. package/src/zexus/vm/fastops.cpython-312-x86_64-linux-gnu.so +0 -0
  155. package/src/zexus/vm/fastops.pyx +288 -0
  156. package/src/zexus/vm/gas_metering.py +50 -9
  157. package/src/zexus/vm/jit.py +364 -20
  158. package/src/zexus/vm/native_jit_backend.py +1816 -0
  159. package/src/zexus/vm/native_runtime.cpp +1388 -0
  160. package/src/zexus/vm/native_runtime.cpython-312-x86_64-linux-gnu.so +0 -0
  161. package/src/zexus/vm/optimizer.py +161 -11
  162. package/src/zexus/vm/parallel_vm.py +140 -45
  163. package/src/zexus/vm/peephole_optimizer.py +82 -4
  164. package/src/zexus/vm/profiler.py +38 -18
  165. package/src/zexus/vm/register_allocator.py +16 -5
  166. package/src/zexus/vm/register_vm.py +8 -5
  167. package/src/zexus/vm/vm.py +3581 -531
  168. package/src/zexus/vm/wasm_compiler.py +658 -0
  169. package/src/zexus/zexus_ast.py +137 -11
  170. package/src/zexus/zexus_token.py +16 -5
  171. package/src/zexus/zpm/installer.py +55 -15
  172. package/src/zexus/zpm/package_manager.py +1 -1
  173. package/src/zexus/zpm/registry.py +257 -28
  174. package/src/zexus.egg-info/PKG-INFO +16 -6
  175. package/src/zexus.egg-info/SOURCES.txt +129 -17
  176. package/src/zexus.egg-info/entry_points.txt +1 -0
  177. package/src/zexus.egg-info/requires.txt +4 -0
@@ -70,6 +70,75 @@ class BytecodeOptimizer:
70
70
  self.max_passes = max_passes
71
71
  self.debug = debug
72
72
  self.stats = OptimizationStats()
73
+ self.min_basic_size = 2
74
+ self.min_aggressive_size = 2
75
+ self.min_experimental_size = 20
76
+ self._control_flow_ops = {
77
+ "JUMP",
78
+ "JUMP_IF_FALSE",
79
+ "JUMP_IF_TRUE",
80
+ "JUMP_FORWARD",
81
+ "JUMP_BACKWARD",
82
+ "LABEL",
83
+ "JUMP_TARGET",
84
+ }
85
+
86
+ def _has_control_flow(self, instructions: List[Tuple[str, Any]]) -> bool:
87
+ """Check if instruction stream contains control-flow operations."""
88
+ for op, _ in instructions:
89
+ if op in self._control_flow_ops:
90
+ return True
91
+ return False
92
+
93
+ def _validate_control_flow(self, instructions: List[Tuple[str, Any]]) -> bool:
94
+ """Validate that jump targets are sane and within bounds."""
95
+ max_index = len(instructions) - 1
96
+ for idx, (op, operand) in enumerate(instructions):
97
+ if op in ("JUMP", "JUMP_IF_FALSE", "JUMP_IF_TRUE", "JUMP_FORWARD", "JUMP_BACKWARD"):
98
+ if not isinstance(operand, int):
99
+ return False
100
+ if operand < 0 or operand > max_index:
101
+ return False
102
+ if op == "JUMP" and operand == idx:
103
+ return False
104
+ if op == "JUMP_FORWARD" and operand <= idx:
105
+ return False
106
+ if op == "JUMP_BACKWARD" and operand >= idx:
107
+ return False
108
+ return True
109
+
110
+ def _run_pass(
111
+ self,
112
+ name: str,
113
+ instructions: List[Tuple[str, Any]],
114
+ apply_fn,
115
+ *,
116
+ can_change_size: bool = True
117
+ ) -> List[Tuple[str, Any]]:
118
+ """Run a pass with logging and control-flow validation."""
119
+ if can_change_size and self._has_control_flow(instructions):
120
+ if self.debug:
121
+ print(f"[Optimizer] {name}: skipped (control flow present)")
122
+ return instructions
123
+
124
+ before_len = len(instructions)
125
+ try:
126
+ optimized = apply_fn()
127
+ except Exception as exc:
128
+ if self.debug:
129
+ print(f"[Optimizer] {name}: failed ({exc})")
130
+ return instructions
131
+
132
+ after_len = len(optimized)
133
+ if self.debug:
134
+ print(f"[Optimizer] {name}: {before_len} -> {after_len}")
135
+
136
+ if not self._validate_control_flow(optimized):
137
+ if self.debug:
138
+ print(f"[Optimizer] {name}: invalid control flow detected, reverting")
139
+ return instructions
140
+
141
+ return optimized
73
142
 
74
143
  def optimize(self, instructions: List[Tuple[str, Any]], constants: List[Any] = None) -> List[Tuple[str, Any]]:
75
144
  """
@@ -97,21 +166,75 @@ class BytecodeOptimizer:
97
166
 
98
167
  # Level 1: Basic optimizations
99
168
  if self.level >= 1:
100
- optimized = self._constant_folding(optimized, constants)
101
- optimized = self._dead_code_elimination(optimized)
102
- optimized = self._peephole_optimization(optimized)
169
+ if len(optimized) >= self.min_basic_size:
170
+ optimized = self._run_pass(
171
+ "constant_folding",
172
+ optimized,
173
+ lambda: self._constant_folding(optimized, constants),
174
+ can_change_size=True,
175
+ )
176
+ optimized = self._run_pass(
177
+ "dead_code_elimination",
178
+ optimized,
179
+ lambda: self._dead_code_elimination(optimized),
180
+ can_change_size=False,
181
+ )
182
+ optimized = self._run_pass(
183
+ "peephole_optimization",
184
+ optimized,
185
+ lambda: self._peephole_optimization(optimized),
186
+ can_change_size=True,
187
+ )
188
+ elif self.debug:
189
+ print("[Optimizer] basic passes skipped (small instruction count)")
103
190
 
104
191
  # Level 2: Aggressive optimizations
105
192
  if self.level >= 2:
106
- optimized = self._copy_propagation(optimized)
107
- optimized = self._instruction_combining(optimized, constants)
108
- optimized = self._jump_threading(optimized)
109
- optimized = self._strength_reduction(optimized)
193
+ if len(optimized) >= self.min_aggressive_size:
194
+ optimized = self._run_pass(
195
+ "copy_propagation",
196
+ optimized,
197
+ lambda: self._copy_propagation(optimized),
198
+ can_change_size=False,
199
+ )
200
+ optimized = self._run_pass(
201
+ "instruction_combining",
202
+ optimized,
203
+ lambda: self._instruction_combining(optimized, constants),
204
+ can_change_size=True,
205
+ )
206
+ optimized = self._run_pass(
207
+ "jump_threading",
208
+ optimized,
209
+ lambda: self._jump_threading(optimized),
210
+ can_change_size=False,
211
+ )
212
+ optimized = self._run_pass(
213
+ "strength_reduction",
214
+ optimized,
215
+ lambda: self._strength_reduction(optimized),
216
+ can_change_size=False,
217
+ )
218
+ elif self.debug:
219
+ print("[Optimizer] aggressive passes skipped (small instruction count)")
110
220
 
111
221
  # Level 3: Experimental optimizations
112
222
  if self.level >= 3:
113
- optimized = self._common_subexpression_elimination(optimized)
114
- optimized = self._loop_invariant_code_motion(optimized)
223
+ if len(optimized) >= self.min_experimental_size:
224
+ optimized = self._run_pass(
225
+ "common_subexpression_elimination",
226
+ optimized,
227
+ lambda: self._common_subexpression_elimination(optimized),
228
+ can_change_size=True,
229
+ )
230
+ optimized = self._run_pass(
231
+ "loop_invariant_code_motion",
232
+ optimized,
233
+ lambda: self._loop_invariant_code_motion(optimized),
234
+ can_change_size=True,
235
+ )
236
+ elif self.debug:
237
+ print("[Optimizer] experimental passes skipped (small instruction count)")
115
238
 
116
239
  self.stats.passes_applied += 1
117
240
 
@@ -282,24 +405,51 @@ class BytecodeOptimizer:
282
405
  def _dead_code_elimination(self, instructions: List[Tuple[str, Any]]) -> List[Tuple[str, Any]]:
283
406
  """
284
407
  Remove unreachable code after RETURN, unconditional JUMP, etc.
408
+ Jump targets are remapped after dead code removal to keep control flow valid.
285
409
  """
286
410
  result = []
287
411
  in_dead_code = False
412
+ jump_targets: Set[int] = set()
413
+ removed_indices: Set[int] = set()
414
+
415
+ for idx, (op, operand) in enumerate(instructions):
416
+ if op in ("JUMP", "JUMP_IF_FALSE", "JUMP_IF_TRUE", "JUMP_FORWARD", "JUMP_BACKWARD"):
417
+ if isinstance(operand, int):
418
+ jump_targets.add(operand)
288
419
 
289
- for op, operand in instructions:
420
+ for idx, (op, operand) in enumerate(instructions):
290
421
  if in_dead_code:
291
422
  # Skip until we hit a jump target or label
292
- if op in ("LABEL", "JUMP_TARGET"):
423
+ if op in ("LABEL", "JUMP_TARGET") or idx in jump_targets:
293
424
  in_dead_code = False
294
425
  result.append((op, operand))
295
426
  else:
296
427
  self.stats.dead_code_removed += 1
428
+ removed_indices.add(idx)
297
429
  else:
298
430
  result.append((op, operand))
299
431
  # Mark dead code after unconditional control flow
300
432
  if op in ("RETURN", "JUMP"):
301
433
  in_dead_code = True
302
434
 
435
+ # Remap jump targets after dead code removal
436
+ if removed_indices:
437
+ index_map: Dict[int, int] = {}
438
+ new_idx = 0
439
+ for old_idx in range(len(instructions)):
440
+ if old_idx not in removed_indices:
441
+ index_map[old_idx] = new_idx
442
+ new_idx += 1
443
+
444
+ jump_ops = ("JUMP", "JUMP_IF_FALSE", "JUMP_IF_TRUE", "JUMP_FORWARD", "JUMP_BACKWARD")
445
+ remapped = []
446
+ for op, operand in result:
447
+ if op in jump_ops and isinstance(operand, int) and operand in index_map:
448
+ remapped.append((op, index_map[operand]))
449
+ else:
450
+ remapped.append((op, operand))
451
+ result = remapped
452
+
303
453
  return result
304
454
 
305
455
  def _peephole_optimization(self, instructions: List[Tuple[str, Any]]) -> List[Tuple[str, Any]]:
@@ -28,6 +28,8 @@ import time
28
28
  import logging
29
29
  from collections import defaultdict
30
30
  import traceback
31
+ import signal
32
+ import concurrent.futures
31
33
 
32
34
  from .bytecode import Bytecode, Opcode
33
35
  from .vm import VM
@@ -111,13 +113,17 @@ def _execute_chunk_helper(args):
111
113
  Returns:
112
114
  ExecutionResult with execution status and metrics
113
115
  """
114
- chunk, shared_state_dict, retry_count = args
116
+ chunk, shared_state_dict, retry_count, builtins, parent_env = args
115
117
 
116
118
  try:
117
119
  start_time = time.time()
118
120
 
119
121
  # Create a minimal VM for this worker
120
122
  vm = VM()
123
+ if builtins is not None:
124
+ vm.builtins = builtins
125
+ if parent_env is not None:
126
+ vm._parent_env = parent_env
121
127
 
122
128
  # Load shared state
123
129
  for var, value in shared_state_dict.items():
@@ -125,6 +131,7 @@ def _execute_chunk_helper(args):
125
131
 
126
132
  # Create bytecode from chunk
127
133
  bytecode = Bytecode()
134
+ bytecode.constants = list(chunk.constants)
128
135
  for opcode, operand in chunk.instructions:
129
136
  bytecode.instructions.append((opcode, operand))
130
137
 
@@ -184,6 +191,7 @@ class BytecodeChunk:
184
191
  variables_read: Set[str] = field(default_factory=set)
185
192
  variables_written: Set[str] = field(default_factory=set)
186
193
  can_parallelize: bool = True
194
+ constants: List[Any] = field(default_factory=list)
187
195
 
188
196
  def __repr__(self):
189
197
  ins_count = len(self.instructions)
@@ -302,6 +310,7 @@ class BytecodeChunker:
302
310
  chunk.variables_read.update(reads)
303
311
  chunk.variables_written.update(writes)
304
312
 
313
+ chunk.constants = list(bytecode.constants)
305
314
  chunks.append(chunk)
306
315
 
307
316
  # Detect dependencies
@@ -398,8 +407,14 @@ class ResultMerger:
398
407
  for result in sorted_results:
399
408
  merged_variables.update(result.variables_modified)
400
409
 
401
- # Last result is the final result
402
- final_result = sorted_results[-1].result if sorted_results else None
410
+ # Prefer the last non-None result; fall back to merged variables
411
+ final_result = None
412
+ for result in reversed(sorted_results):
413
+ if result.result is not None:
414
+ final_result = result.result
415
+ break
416
+ if final_result is None and merged_variables:
417
+ final_result = merged_variables
403
418
 
404
419
  return True, final_result, merged_variables
405
420
 
@@ -436,7 +451,12 @@ class WorkerPool:
436
451
  def start(self) -> None:
437
452
  """Start the worker pool"""
438
453
  if self.pool is None:
439
- self.pool = Pool(processes=self.num_workers)
454
+ try:
455
+ self.pool = Pool(processes=self.num_workers)
456
+ except (OSError, RuntimeError) as e:
457
+ logger.warning(f"Failed to start multiprocessing Pool: {e}")
458
+ self.pool = None
459
+ raise
440
460
 
441
461
  def shutdown(self) -> None:
442
462
  """Shutdown the worker pool"""
@@ -445,7 +465,13 @@ class WorkerPool:
445
465
  self.pool.join()
446
466
  self.pool = None
447
467
 
448
- def execute_chunk(self, chunk: BytecodeChunk, shared_state: SharedState) -> ExecutionResult:
468
+ def execute_chunk(
469
+ self,
470
+ chunk: BytecodeChunk,
471
+ shared_state: SharedState,
472
+ builtins: Optional[Dict[str, Any]] = None,
473
+ parent_env: Optional[Dict[str, Any]] = None,
474
+ ) -> ExecutionResult:
449
475
  """
450
476
  Execute a single chunk (called by worker process).
451
477
 
@@ -456,6 +482,10 @@ class WorkerPool:
456
482
  try:
457
483
  # Create a local VM for this chunk
458
484
  vm = VM()
485
+ if builtins is not None:
486
+ vm.builtins = builtins
487
+ if parent_env is not None:
488
+ vm._parent_env = parent_env
459
489
 
460
490
  # Load shared variables
461
491
  for var in chunk.variables_read:
@@ -500,10 +530,14 @@ class WorkerPool:
500
530
  execution_time=execution_time
501
531
  )
502
532
 
503
- def submit_chunks(self,
504
- chunks: List[BytecodeChunk],
505
- shared_state: SharedState,
506
- config: ParallelConfig) -> List[ExecutionResult]:
533
+ def submit_chunks(
534
+ self,
535
+ chunks: List[BytecodeChunk],
536
+ shared_state: SharedState,
537
+ config: ParallelConfig,
538
+ builtins: Optional[Dict[str, Any]] = None,
539
+ parent_env: Optional[Dict[str, Any]] = None,
540
+ ) -> List[ExecutionResult]:
507
541
  """
508
542
  Submit chunks for parallel execution with retry logic.
509
543
 
@@ -537,38 +571,55 @@ class WorkerPool:
537
571
 
538
572
  # Execute parallel chunks with retry logic
539
573
  if parallel_chunks and len(parallel_chunks) > 1:
540
- # Use cloudpickle for better serialization
574
+ # Use snapshot of shared state for this level
541
575
  shared_dict = dict(shared_state.variables)
542
-
543
- chunk_results = []
544
- for chunk in parallel_chunks:
545
- retry_count = 0
546
- success = False
547
-
548
- while retry_count < config.retry_attempts and not success:
576
+
577
+ pending: Dict[int, Tuple[BytecodeChunk, int]] = {
578
+ c.chunk_id: (c, 0) for c in parallel_chunks
579
+ }
580
+ chunk_results: List[ExecutionResult] = []
581
+
582
+ while pending:
583
+ futures: Dict[int, Any] = {}
584
+ for chunk_id, (chunk, retry_count) in pending.items():
585
+ futures[chunk_id] = self.pool.apply_async(
586
+ _execute_chunk_helper,
587
+ ((chunk, shared_dict, retry_count, builtins, parent_env),)
588
+ )
589
+
590
+ next_pending: Dict[int, Tuple[BytecodeChunk, int]] = {}
591
+ for chunk_id, future in futures.items():
592
+ chunk, retry_count = pending[chunk_id]
549
593
  try:
550
- # Submit with timeout
551
- future = self.pool.apply_async(
552
- _execute_chunk_helper,
553
- ((chunk, shared_dict, retry_count),)
554
- )
555
594
  result = future.get(timeout=config.timeout_seconds)
556
-
557
595
  if result.success:
558
- success = True
559
596
  chunk_results.append(result)
560
597
  metrics.chunks_succeeded += 1
561
598
  else:
562
599
  retry_count += 1
563
- metrics.chunks_retried += 1
564
- logger.warning(f"Chunk {chunk.chunk_id} failed, retry {retry_count}/{config.retry_attempts}")
565
-
600
+ if retry_count < config.retry_attempts:
601
+ metrics.chunks_retried += 1
602
+ logger.warning(
603
+ f"Chunk {chunk.chunk_id} failed, retry {retry_count}/{config.retry_attempts}"
604
+ )
605
+ next_pending[chunk_id] = (chunk, retry_count)
606
+ else:
607
+ result.retry_count = retry_count
608
+ chunk_results.append(result)
609
+ metrics.chunks_failed += 1
610
+ metrics.errors.append(
611
+ f"Chunk {chunk.chunk_id}: {result.error or 'unknown error'}"
612
+ )
613
+
566
614
  except mp.TimeoutError:
567
615
  retry_count += 1
568
- metrics.chunks_retried += 1
569
- logger.error(f"Chunk {chunk.chunk_id} timed out after {config.timeout_seconds}s")
570
-
571
- if retry_count >= config.retry_attempts:
616
+ if retry_count < config.retry_attempts:
617
+ metrics.chunks_retried += 1
618
+ logger.error(
619
+ f"Chunk {chunk.chunk_id} timed out after {config.timeout_seconds}s"
620
+ )
621
+ next_pending[chunk_id] = (chunk, retry_count)
622
+ else:
572
623
  error_result = ExecutionResult(
573
624
  chunk_id=chunk.chunk_id,
574
625
  success=False,
@@ -578,13 +629,14 @@ class WorkerPool:
578
629
  chunk_results.append(error_result)
579
630
  metrics.chunks_failed += 1
580
631
  metrics.errors.append(f"Chunk {chunk.chunk_id} timeout")
581
-
632
+
582
633
  except Exception as e:
583
634
  retry_count += 1
584
- metrics.chunks_retried += 1
585
- logger.error(f"Chunk {chunk.chunk_id} error: {e}")
586
-
587
- if retry_count >= config.retry_attempts:
635
+ if retry_count < config.retry_attempts:
636
+ metrics.chunks_retried += 1
637
+ logger.error(f"Chunk {chunk.chunk_id} error: {e}")
638
+ next_pending[chunk_id] = (chunk, retry_count)
639
+ else:
588
640
  error_result = ExecutionResult(
589
641
  chunk_id=chunk.chunk_id,
590
642
  success=False,
@@ -595,9 +647,11 @@ class WorkerPool:
595
647
  chunk_results.append(error_result)
596
648
  metrics.chunks_failed += 1
597
649
  metrics.errors.append(f"Chunk {chunk.chunk_id}: {str(e)}")
598
-
650
+
651
+ pending = next_pending
652
+
599
653
  results.extend(chunk_results)
600
-
654
+
601
655
  # Update shared state with results
602
656
  for result in chunk_results:
603
657
  if result.success:
@@ -605,7 +659,7 @@ class WorkerPool:
605
659
 
606
660
  # Execute sequential chunks one by one
607
661
  for chunk in sequential_chunks + (parallel_chunks if len(parallel_chunks) == 1 else []):
608
- result = self.execute_chunk(chunk, shared_state)
662
+ result = self.execute_chunk(chunk, shared_state, builtins=builtins, parent_env=parent_env)
609
663
  results.append(result)
610
664
 
611
665
  if result.success:
@@ -701,11 +755,21 @@ class ParallelVM:
701
755
  # Metrics
702
756
  self.last_metrics: Optional[ExecutionMetrics] = None
703
757
  self.cumulative_metrics = ExecutionMetrics()
758
+ self.stats = {
759
+ "parallel_executions": 0,
760
+ "sequential_executions": 0,
761
+ "fallback_executions": 0,
762
+ }
704
763
 
705
764
  logger.info(f"ParallelVM initialized: {self.config.worker_count} workers, "
706
765
  f"chunk_size={self.config.chunk_size}, mode={mode.value}")
707
766
 
708
- def execute(self, bytecode: Bytecode, sequential_fallback: bool = True) -> Any:
767
+ def execute(
768
+ self,
769
+ bytecode: Bytecode,
770
+ sequential_fallback: bool = True,
771
+ initial_state: Optional[Dict[str, Any]] = None,
772
+ ) -> Any:
709
773
  """
710
774
  Execute bytecode in parallel with metrics and error handling.
711
775
 
@@ -751,15 +815,41 @@ class ParallelVM:
751
815
  metrics.chunk_count = len(chunks)
752
816
  logger.info(f"Created {len(chunks)} chunks in {time.time() - chunk_start:.4f}s")
753
817
 
754
- # Initialize shared state
755
- manager = Manager()
756
- self.shared_state = SharedState(manager)
818
+ # Initialize shared state — use thread-safe dict instead of
819
+ # multiprocessing.Manager which can hang in container environments
820
+ try:
821
+ manager = Manager()
822
+ self.shared_state = SharedState(manager)
823
+ except (OSError, RuntimeError) as e:
824
+ logger.warning(f"multiprocessing.Manager() failed ({e}), falling back to sequential")
825
+ if sequential_fallback and self.config.enable_fallback:
826
+ self.stats["fallback_executions"] += 1
827
+ result = self._execute_sequential(bytecode)
828
+ metrics.total_time = time.time() - start_time
829
+ self.last_metrics = metrics
830
+ return result
831
+ raise
757
832
  self.merger = ResultMerger()
833
+
834
+ builtins = None
835
+ parent_env = None
836
+ if initial_state:
837
+ env_state = initial_state.get("env")
838
+ if isinstance(env_state, dict):
839
+ self.shared_state.batch_write(env_state)
840
+ builtins = initial_state.get("builtins")
841
+ parent_env = initial_state.get("parent_env")
758
842
 
759
843
  # Execute chunks in parallel
760
844
  parallel_start = time.time()
761
845
  with self.worker_pool as pool:
762
- results = pool.submit_chunks(chunks, self.shared_state, self.config)
846
+ results = pool.submit_chunks(
847
+ chunks,
848
+ self.shared_state,
849
+ self.config,
850
+ builtins=builtins,
851
+ parent_env=parent_env,
852
+ )
763
853
  metrics.parallel_time = time.time() - parallel_start
764
854
 
765
855
  logger.info(f"Parallel execution completed in {metrics.parallel_time:.4f}s")
@@ -788,6 +878,7 @@ class ParallelVM:
788
878
 
789
879
  if not success and sequential_fallback:
790
880
  logger.warning(f"Parallel execution failed: {final_result}. Falling back to sequential.")
881
+ self.stats["fallback_executions"] += 1
791
882
  result = self._execute_sequential(bytecode)
792
883
  metrics.total_time = time.time() - start_time
793
884
  self.last_metrics = metrics
@@ -811,6 +902,7 @@ class ParallelVM:
811
902
  self.cumulative_metrics.chunks_retried += metrics.chunks_retried
812
903
 
813
904
  self.last_metrics = metrics
905
+ self.stats["parallel_executions"] += 1
814
906
 
815
907
  if self.config.enable_metrics:
816
908
  logger.info(f"Execution metrics: {metrics.to_dict()}")
@@ -826,6 +918,7 @@ class ParallelVM:
826
918
 
827
919
  if sequential_fallback and self.config.enable_fallback:
828
920
  logger.warning("Falling back to sequential execution due to error")
921
+ self.stats["fallback_executions"] += 1
829
922
  return self._execute_sequential(bytecode)
830
923
  else:
831
924
  raise
@@ -834,7 +927,9 @@ class ParallelVM:
834
927
  """Execute bytecode sequentially (fallback mode)"""
835
928
  logger.info("Executing in sequential mode")
836
929
  vm = VM()
837
- return vm.execute(bytecode)
930
+ result = vm.execute(bytecode)
931
+ self.stats["sequential_executions"] += 1
932
+ return result
838
933
 
839
934
  def get_statistics(self) -> Dict[str, Any]:
840
935
  """Get execution statistics"""
@@ -82,6 +82,8 @@ class PeepholeOptimizer:
82
82
  def __init__(self, level: OptimizationLevel = OptimizationLevel.BASIC):
83
83
  self.level = level
84
84
  self.stats = OptimizationStats()
85
+ self._consts: Optional[List[Any]] = None
86
+ self._const_indexed = False
85
87
 
86
88
  # Optimization pattern matchers
87
89
  self.patterns = {
@@ -144,6 +146,63 @@ class PeepholeOptimizer:
144
146
  )
145
147
 
146
148
  return optimized
149
+
150
+ def optimize_bytecode(
151
+ self,
152
+ instructions: List[Tuple[Any, Any]],
153
+ constants: Optional[List[Any]] = None
154
+ ) -> Tuple[List[Tuple[Any, Any]], List[Any]]:
155
+ """
156
+ Optimize raw bytecode instructions with constant pool awareness.
157
+
158
+ Args:
159
+ instructions: List of (opcode, operand) tuples
160
+ constants: Bytecode constants list (optional)
161
+
162
+ Returns:
163
+ (optimized_instructions, updated_constants)
164
+ """
165
+ control_flow_ops = {
166
+ "JUMP",
167
+ "JUMP_IF_FALSE",
168
+ "JUMP_IF_TRUE",
169
+ "JUMP_FORWARD",
170
+ "JUMP_BACKWARD",
171
+ "JUMP_TARGET",
172
+ "LABEL",
173
+ }
174
+ for instr in instructions:
175
+ if instr is None:
176
+ continue
177
+ if isinstance(instr, tuple) and len(instr) >= 1:
178
+ op = instr[0]
179
+ op_name = op.name if hasattr(op, "name") else str(op)
180
+ if op_name in control_flow_ops:
181
+ return instructions, list(constants) if constants is not None else []
182
+
183
+ self._consts = list(constants) if constants is not None else []
184
+ self._const_indexed = constants is not None
185
+
186
+ normalized: List[Instruction] = []
187
+ for instr in instructions:
188
+ if instr is None:
189
+ continue
190
+ if isinstance(instr, tuple) and len(instr) >= 2:
191
+ op = instr[0]
192
+ operand = instr[1] if len(instr) == 2 else tuple(instr[1:])
193
+ op_name = op.name if hasattr(op, "name") else op
194
+ normalized.append(Instruction(str(op_name), operand, 0))
195
+
196
+ optimized = self.optimize(normalized)
197
+
198
+ out: List[Tuple[Any, Any]] = []
199
+ for instr in optimized:
200
+ if instr.opcode == "LOAD_CONST" and self._const_indexed:
201
+ out.append((instr.opcode, instr.arg))
202
+ else:
203
+ out.append((instr.opcode, instr.arg))
204
+
205
+ return out, list(self._consts)
147
206
 
148
207
  def _apply_pattern(
149
208
  self,
@@ -199,9 +258,9 @@ class PeepholeOptimizer:
199
258
  'BINARY_ADD', 'BINARY_SUB', 'BINARY_MUL',
200
259
  'BINARY_DIV', 'BINARY_MOD')):
201
260
 
202
- # Both operands must be constants
203
- a = inst1.arg
204
- b = inst2.arg
261
+ # Resolve constant values (supports constant pool indices)
262
+ a = self._resolve_const(inst1.arg)
263
+ b = self._resolve_const(inst2.arg)
205
264
 
206
265
  # Only fold numeric constants
207
266
  if not (isinstance(a, (int, float)) and isinstance(b, (int, float))):
@@ -228,7 +287,11 @@ class PeepholeOptimizer:
228
287
  return False, [], 0
229
288
 
230
289
  # Replace with single LOAD_CONST
231
- replacement = [Instruction('LOAD_CONST', result, inst1.lineno)]
290
+ if self._const_indexed:
291
+ const_index = self._add_const(result)
292
+ replacement = [Instruction('LOAD_CONST', const_index, inst1.lineno)]
293
+ else:
294
+ replacement = [Instruction('LOAD_CONST', result, inst1.lineno)]
232
295
  self.stats.constant_folds += 1
233
296
  return True, replacement, 3
234
297
 
@@ -237,6 +300,21 @@ class PeepholeOptimizer:
237
300
  return False, [], 0
238
301
 
239
302
  return False, [], 0
303
+
304
+ def _resolve_const(self, arg: Any) -> Any:
305
+ if self._const_indexed and isinstance(arg, int) and self._consts is not None:
306
+ if 0 <= arg < len(self._consts):
307
+ return self._consts[arg]
308
+ return arg
309
+
310
+ def _add_const(self, value: Any) -> int:
311
+ if self._consts is None:
312
+ self._consts = []
313
+ for i, const in enumerate(self._consts):
314
+ if const == value and type(const) == type(value):
315
+ return i
316
+ self._consts.append(value)
317
+ return len(self._consts) - 1
240
318
 
241
319
  # ========== Dead Code Elimination ==========
242
320