zexus 1.7.1 → 1.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (159) hide show
  1. package/README.md +3 -3
  2. package/package.json +1 -1
  3. package/src/__init__.py +7 -0
  4. package/src/zexus/__init__.py +1 -1
  5. package/src/zexus/__pycache__/__init__.cpython-312.pyc +0 -0
  6. package/src/zexus/__pycache__/capability_system.cpython-312.pyc +0 -0
  7. package/src/zexus/__pycache__/debug_sanitizer.cpython-312.pyc +0 -0
  8. package/src/zexus/__pycache__/environment.cpython-312.pyc +0 -0
  9. package/src/zexus/__pycache__/error_reporter.cpython-312.pyc +0 -0
  10. package/src/zexus/__pycache__/input_validation.cpython-312.pyc +0 -0
  11. package/src/zexus/__pycache__/lexer.cpython-312.pyc +0 -0
  12. package/src/zexus/__pycache__/module_cache.cpython-312.pyc +0 -0
  13. package/src/zexus/__pycache__/module_manager.cpython-312.pyc +0 -0
  14. package/src/zexus/__pycache__/object.cpython-312.pyc +0 -0
  15. package/src/zexus/__pycache__/security.cpython-312.pyc +0 -0
  16. package/src/zexus/__pycache__/security_enforcement.cpython-312.pyc +0 -0
  17. package/src/zexus/__pycache__/syntax_validator.cpython-312.pyc +0 -0
  18. package/src/zexus/__pycache__/zexus_ast.cpython-312.pyc +0 -0
  19. package/src/zexus/__pycache__/zexus_token.cpython-312.pyc +0 -0
  20. package/src/zexus/access_control_system/__pycache__/__init__.cpython-312.pyc +0 -0
  21. package/src/zexus/access_control_system/__pycache__/access_control.cpython-312.pyc +0 -0
  22. package/src/zexus/advanced_types.py +17 -2
  23. package/src/zexus/blockchain/__init__.py +411 -0
  24. package/src/zexus/blockchain/accelerator.py +1160 -0
  25. package/src/zexus/blockchain/chain.py +660 -0
  26. package/src/zexus/blockchain/consensus.py +821 -0
  27. package/src/zexus/blockchain/contract_vm.py +1019 -0
  28. package/src/zexus/blockchain/crypto.py +79 -14
  29. package/src/zexus/blockchain/events.py +526 -0
  30. package/src/zexus/blockchain/loadtest.py +721 -0
  31. package/src/zexus/blockchain/monitoring.py +350 -0
  32. package/src/zexus/blockchain/mpt.py +716 -0
  33. package/src/zexus/blockchain/multichain.py +951 -0
  34. package/src/zexus/blockchain/multiprocess_executor.py +338 -0
  35. package/src/zexus/blockchain/network.py +886 -0
  36. package/src/zexus/blockchain/node.py +666 -0
  37. package/src/zexus/blockchain/rpc.py +1203 -0
  38. package/src/zexus/blockchain/rust_bridge.py +421 -0
  39. package/src/zexus/blockchain/storage.py +423 -0
  40. package/src/zexus/blockchain/tokens.py +750 -0
  41. package/src/zexus/blockchain/upgradeable.py +1004 -0
  42. package/src/zexus/blockchain/verification.py +1602 -0
  43. package/src/zexus/blockchain/wallet.py +621 -0
  44. package/src/zexus/cli/__pycache__/main.cpython-312.pyc +0 -0
  45. package/src/zexus/cli/main.py +300 -20
  46. package/src/zexus/cli/zpm.py +1 -1
  47. package/src/zexus/compiler/__pycache__/bytecode.cpython-312.pyc +0 -0
  48. package/src/zexus/compiler/__pycache__/lexer.cpython-312.pyc +0 -0
  49. package/src/zexus/compiler/__pycache__/parser.cpython-312.pyc +0 -0
  50. package/src/zexus/compiler/__pycache__/semantic.cpython-312.pyc +0 -0
  51. package/src/zexus/compiler/__pycache__/zexus_ast.cpython-312.pyc +0 -0
  52. package/src/zexus/compiler/lexer.py +10 -5
  53. package/src/zexus/concurrency_system.py +79 -0
  54. package/src/zexus/config.py +54 -0
  55. package/src/zexus/crypto_bridge.py +244 -8
  56. package/src/zexus/dap/__init__.py +10 -0
  57. package/src/zexus/dap/__main__.py +4 -0
  58. package/src/zexus/dap/dap_server.py +391 -0
  59. package/src/zexus/dap/debug_engine.py +298 -0
  60. package/src/zexus/environment.py +10 -1
  61. package/src/zexus/evaluator/__pycache__/bytecode_compiler.cpython-312.pyc +0 -0
  62. package/src/zexus/evaluator/__pycache__/core.cpython-312.pyc +0 -0
  63. package/src/zexus/evaluator/__pycache__/expressions.cpython-312.pyc +0 -0
  64. package/src/zexus/evaluator/__pycache__/functions.cpython-312.pyc +0 -0
  65. package/src/zexus/evaluator/__pycache__/resource_limiter.cpython-312.pyc +0 -0
  66. package/src/zexus/evaluator/__pycache__/statements.cpython-312.pyc +0 -0
  67. package/src/zexus/evaluator/__pycache__/unified_execution.cpython-312.pyc +0 -0
  68. package/src/zexus/evaluator/__pycache__/utils.cpython-312.pyc +0 -0
  69. package/src/zexus/evaluator/bytecode_compiler.py +441 -37
  70. package/src/zexus/evaluator/core.py +560 -49
  71. package/src/zexus/evaluator/expressions.py +122 -49
  72. package/src/zexus/evaluator/functions.py +417 -16
  73. package/src/zexus/evaluator/statements.py +521 -118
  74. package/src/zexus/evaluator/unified_execution.py +573 -72
  75. package/src/zexus/evaluator/utils.py +14 -2
  76. package/src/zexus/event_loop.py +186 -0
  77. package/src/zexus/lexer.py +742 -486
  78. package/src/zexus/lsp/__init__.py +1 -1
  79. package/src/zexus/lsp/definition_provider.py +163 -9
  80. package/src/zexus/lsp/server.py +22 -8
  81. package/src/zexus/lsp/symbol_provider.py +182 -9
  82. package/src/zexus/module_cache.py +237 -9
  83. package/src/zexus/object.py +64 -6
  84. package/src/zexus/parser/__pycache__/parser.cpython-312.pyc +0 -0
  85. package/src/zexus/parser/__pycache__/strategy_context.cpython-312.pyc +0 -0
  86. package/src/zexus/parser/__pycache__/strategy_structural.cpython-312.pyc +0 -0
  87. package/src/zexus/parser/parser.py +786 -285
  88. package/src/zexus/parser/strategy_context.py +407 -66
  89. package/src/zexus/parser/strategy_structural.py +117 -19
  90. package/src/zexus/persistence.py +15 -1
  91. package/src/zexus/renderer/__init__.py +15 -0
  92. package/src/zexus/renderer/__pycache__/__init__.cpython-312.pyc +0 -0
  93. package/src/zexus/renderer/__pycache__/backend.cpython-312.pyc +0 -0
  94. package/src/zexus/renderer/__pycache__/canvas.cpython-312.pyc +0 -0
  95. package/src/zexus/renderer/__pycache__/color_system.cpython-312.pyc +0 -0
  96. package/src/zexus/renderer/__pycache__/layout.cpython-312.pyc +0 -0
  97. package/src/zexus/renderer/__pycache__/main_renderer.cpython-312.pyc +0 -0
  98. package/src/zexus/renderer/__pycache__/painter.cpython-312.pyc +0 -0
  99. package/src/zexus/renderer/tk_backend.py +208 -0
  100. package/src/zexus/renderer/web_backend.py +260 -0
  101. package/src/zexus/runtime/__pycache__/__init__.cpython-312.pyc +0 -0
  102. package/src/zexus/runtime/__pycache__/async_runtime.cpython-312.pyc +0 -0
  103. package/src/zexus/runtime/__pycache__/load_manager.cpython-312.pyc +0 -0
  104. package/src/zexus/runtime/file_flags.py +137 -0
  105. package/src/zexus/safety/__pycache__/__init__.cpython-312.pyc +0 -0
  106. package/src/zexus/safety/__pycache__/memory_safety.cpython-312.pyc +0 -0
  107. package/src/zexus/security.py +424 -34
  108. package/src/zexus/stdlib/fs.py +23 -18
  109. package/src/zexus/stdlib/http.py +289 -186
  110. package/src/zexus/stdlib/sockets.py +207 -163
  111. package/src/zexus/stdlib/websockets.py +282 -0
  112. package/src/zexus/stdlib_integration.py +369 -2
  113. package/src/zexus/strategy_recovery.py +6 -3
  114. package/src/zexus/type_checker.py +423 -0
  115. package/src/zexus/virtual_filesystem.py +189 -2
  116. package/src/zexus/vm/__init__.py +113 -3
  117. package/src/zexus/vm/__pycache__/async_optimizer.cpython-312.pyc +0 -0
  118. package/src/zexus/vm/__pycache__/bytecode.cpython-312.pyc +0 -0
  119. package/src/zexus/vm/__pycache__/bytecode_converter.cpython-312.pyc +0 -0
  120. package/src/zexus/vm/__pycache__/cache.cpython-312.pyc +0 -0
  121. package/src/zexus/vm/__pycache__/compiler.cpython-312.pyc +0 -0
  122. package/src/zexus/vm/__pycache__/gas_metering.cpython-312.pyc +0 -0
  123. package/src/zexus/vm/__pycache__/jit.cpython-312.pyc +0 -0
  124. package/src/zexus/vm/__pycache__/parallel_vm.cpython-312.pyc +0 -0
  125. package/src/zexus/vm/__pycache__/vm.cpython-312.pyc +0 -0
  126. package/src/zexus/vm/async_optimizer.py +14 -1
  127. package/src/zexus/vm/binary_bytecode.py +659 -0
  128. package/src/zexus/vm/bytecode.py +28 -1
  129. package/src/zexus/vm/bytecode_converter.py +26 -12
  130. package/src/zexus/vm/cabi.c +1985 -0
  131. package/src/zexus/vm/cabi.cpython-312-x86_64-linux-gnu.so +0 -0
  132. package/src/zexus/vm/cabi.h +127 -0
  133. package/src/zexus/vm/cache.py +557 -17
  134. package/src/zexus/vm/compiler.py +703 -5
  135. package/src/zexus/vm/fastops.c +15743 -0
  136. package/src/zexus/vm/fastops.cpython-312-x86_64-linux-gnu.so +0 -0
  137. package/src/zexus/vm/fastops.pyx +288 -0
  138. package/src/zexus/vm/gas_metering.py +50 -9
  139. package/src/zexus/vm/jit.py +83 -2
  140. package/src/zexus/vm/native_jit_backend.py +1816 -0
  141. package/src/zexus/vm/native_runtime.cpp +1388 -0
  142. package/src/zexus/vm/native_runtime.cpython-312-x86_64-linux-gnu.so +0 -0
  143. package/src/zexus/vm/optimizer.py +161 -11
  144. package/src/zexus/vm/parallel_vm.py +118 -42
  145. package/src/zexus/vm/peephole_optimizer.py +82 -4
  146. package/src/zexus/vm/profiler.py +38 -18
  147. package/src/zexus/vm/register_allocator.py +16 -5
  148. package/src/zexus/vm/register_vm.py +8 -5
  149. package/src/zexus/vm/vm.py +3411 -573
  150. package/src/zexus/vm/wasm_compiler.py +658 -0
  151. package/src/zexus/zexus_ast.py +63 -11
  152. package/src/zexus/zexus_token.py +13 -5
  153. package/src/zexus/zpm/installer.py +55 -15
  154. package/src/zexus/zpm/package_manager.py +1 -1
  155. package/src/zexus/zpm/registry.py +257 -28
  156. package/src/zexus.egg-info/PKG-INFO +7 -4
  157. package/src/zexus.egg-info/SOURCES.txt +116 -9
  158. package/src/zexus.egg-info/entry_points.txt +1 -0
  159. package/src/zexus.egg-info/requires.txt +4 -0
@@ -20,12 +20,19 @@ LOWEST, TERNARY, ASSIGN_PREC, NULLISH_PREC, LOGICAL, EQUALS, LESSGREATER, SUM, P
20
20
  precedences = {
21
21
  QUESTION: TERNARY, # condition ? true : false (very low precedence)
22
22
  ASSIGN: ASSIGN_PREC,
23
+ PLUS_ASSIGN: ASSIGN_PREC,
24
+ MINUS_ASSIGN: ASSIGN_PREC,
25
+ STAR_ASSIGN: ASSIGN_PREC,
26
+ SLASH_ASSIGN: ASSIGN_PREC,
27
+ MOD_ASSIGN: ASSIGN_PREC,
28
+ POWER_ASSIGN: ASSIGN_PREC,
23
29
  NULLISH: NULLISH_PREC, # value ?? default
24
30
  OR: LOGICAL, AND: LOGICAL,
25
31
  EQ: EQUALS, NOT_EQ: EQUALS,
26
32
  LT: LESSGREATER, GT: LESSGREATER, LTE: LESSGREATER, GTE: LESSGREATER,
27
33
  PLUS: SUM, MINUS: SUM,
28
34
  SLASH: PRODUCT, STAR: PRODUCT, MOD: PRODUCT,
35
+ POWER: PREFIX, # ** has higher precedence than * and /
29
36
  LPAREN: CALL,
30
37
  LBRACKET: CALL,
31
38
  LBRACE: CALL, # Entity{...} constructor syntax
@@ -60,12 +67,89 @@ class UltimateParser:
60
67
  else:
61
68
  self.use_advanced_parsing = False
62
69
 
70
+ # Statement dispatch table (O(1) lookup replacing if/elif chain)
71
+ self._statement_dispatch = {
72
+ LET: self.parse_let_statement,
73
+ CONST: self.parse_const_statement,
74
+ DATA: self.parse_data_statement,
75
+ RETURN: self.parse_return_statement,
76
+ CONTINUE: self.parse_continue_statement,
77
+ BREAK: self.parse_break_statement,
78
+ THROW: self.parse_throw_statement,
79
+ PRINT: self.parse_print_statement,
80
+ FOR: self.parse_for_each_statement,
81
+ SCREEN: self.parse_screen_statement,
82
+ COLOR: self.parse_color_statement,
83
+ CANVAS: self.parse_canvas_statement,
84
+ GRAPHICS: self.parse_graphics_statement,
85
+ ANIMATION: self.parse_animation_statement,
86
+ CLOCK: self.parse_clock_statement,
87
+ ACTION: self.parse_action_statement,
88
+ FUNCTION: self.parse_function_statement,
89
+ IF: self.parse_if_statement,
90
+ WHILE: self.parse_while_statement,
91
+ USE: self.parse_use_statement,
92
+ EXACTLY: self.parse_exactly_statement,
93
+ EXPORT: self.parse_export_statement,
94
+ DEBUG: self.parse_debug_statement,
95
+ TRY: self.parse_try_catch_statement,
96
+ EXTERNAL: self.parse_external_declaration,
97
+ ENTITY: self.parse_entity_statement,
98
+ VERIFY: self.parse_verify_statement,
99
+ CONTRACT: self.parse_contract_statement,
100
+ PROTECT: self.parse_protect_statement,
101
+ SEAL: self.parse_seal_statement,
102
+ AUDIT: self.parse_audit_statement,
103
+ RESTRICT: self.parse_restrict_statement,
104
+ SANDBOX: self.parse_sandbox_statement,
105
+ TRAIL: self.parse_trail_statement,
106
+ TX: self.parse_tx_statement,
107
+ NATIVE: self.parse_native_statement,
108
+ GC: self.parse_gc_statement,
109
+ INLINE: self.parse_inline_statement,
110
+ BUFFER: self.parse_buffer_statement,
111
+ SIMD: self.parse_simd_statement,
112
+ DEFER: self.parse_defer_statement,
113
+ PATTERN: self.parse_pattern_statement,
114
+ ENUM: self.parse_enum_statement,
115
+ STREAM: self.parse_stream_statement,
116
+ WATCH: self.parse_watch_statement,
117
+ EMIT: self.parse_emit_statement,
118
+ MODIFIER: self.parse_modifier_declaration,
119
+ # Security statements
120
+ CAPABILITY: self.parse_capability_statement,
121
+ GRANT: self.parse_grant_statement,
122
+ REVOKE: self.parse_revoke_statement,
123
+ VALIDATE: self.parse_validate_statement,
124
+ SANITIZE: self.parse_sanitize_statement,
125
+ INJECT: self.parse_inject_statement,
126
+ IMMUTABLE: self.parse_immutable_statement,
127
+ # Complexity statements
128
+ INTERFACE: self.parse_interface_statement,
129
+ TYPE_ALIAS: self.parse_type_alias_statement,
130
+ MODULE: self.parse_module_statement,
131
+ PACKAGE: self.parse_package_statement,
132
+ USING: self.parse_using_statement,
133
+ CHANNEL: self.parse_channel_statement,
134
+ SEND: self.parse_send_statement,
135
+ RECEIVE: self.parse_receive_statement,
136
+ ATOMIC: self.parse_atomic_statement,
137
+ # Blockchain statements
138
+ LEDGER: self.parse_ledger_statement,
139
+ STATE: self.parse_state_statement,
140
+ REQUIRE: self.parse_require_statement,
141
+ REVERT: self.parse_revert_statement,
142
+ LIMIT: self.parse_limit_statement,
143
+ }
144
+
63
145
  # Traditional parser setup (fallback)
64
146
  self.prefix_parse_fns = {
65
147
  IDENT: self.parse_identifier,
148
+ EVENT: self.parse_identifier,
66
149
  INT: self.parse_integer_literal,
67
150
  FLOAT: self.parse_float_literal,
68
151
  STRING: self.parse_string_literal,
152
+ INTERP_STRING: self.parse_interpolated_string,
69
153
  BANG: self.parse_prefix_expression,
70
154
  MINUS: self.parse_prefix_expression,
71
155
  TRUE: self.parse_boolean,
@@ -88,6 +172,7 @@ class UltimateParser:
88
172
  SANITIZE: self.parse_sanitize_expression, # FIX #4: Support sanitize as expression
89
173
  FIND: self.parse_find_expression,
90
174
  LOAD: self.parse_load_expression,
175
+ MATCH: self.parse_match_expression,
91
176
  }
92
177
  self.infix_parse_fns = {
93
178
  PLUS: self.parse_infix_expression,
@@ -95,6 +180,7 @@ class UltimateParser:
95
180
  SLASH: self.parse_infix_expression,
96
181
  STAR: self.parse_infix_expression,
97
182
  MOD: self.parse_infix_expression,
183
+ POWER: self.parse_infix_expression,
98
184
  EQ: self.parse_infix_expression,
99
185
  NOT_EQ: self.parse_infix_expression,
100
186
  LT: self.parse_infix_expression,
@@ -106,6 +192,12 @@ class UltimateParser:
106
192
  QUESTION: self.parse_ternary_expression, # condition ? true : false
107
193
  NULLISH: self.parse_nullish_expression, # value ?? default
108
194
  ASSIGN: self.parse_assignment_expression,
195
+ PLUS_ASSIGN: self.parse_compound_assignment_expression,
196
+ MINUS_ASSIGN: self.parse_compound_assignment_expression,
197
+ STAR_ASSIGN: self.parse_compound_assignment_expression,
198
+ SLASH_ASSIGN: self.parse_compound_assignment_expression,
199
+ MOD_ASSIGN: self.parse_compound_assignment_expression,
200
+ POWER_ASSIGN: self.parse_compound_assignment_expression,
109
201
  LAMBDA: self.parse_lambda_infix, # support arrow-style lambdas: params => body
110
202
  LPAREN: self.parse_call_expression,
111
203
  LBRACE: self.parse_constructor_call_expression, # Entity{field: value} syntax
@@ -125,6 +217,10 @@ class UltimateParser:
125
217
  lex.line,
126
218
  lex.column,
127
219
  lex.last_token_type,
220
+ getattr(lex, 'at_statement_boundary', True),
221
+ getattr(lex, 'paren_depth', 0),
222
+ getattr(lex, 'bracket_depth', 0),
223
+ getattr(lex, 'brace_depth', 0),
128
224
  )
129
225
 
130
226
  def _restore_lexer_state(self, snapshot):
@@ -138,7 +234,19 @@ class UltimateParser:
138
234
  self.lexer.line,
139
235
  self.lexer.column,
140
236
  self.lexer.last_token_type,
237
+ boundary,
238
+ paren_depth,
239
+ bracket_depth,
240
+ brace_depth,
141
241
  ) = snapshot
242
+ if hasattr(self.lexer, 'at_statement_boundary'):
243
+ self.lexer.at_statement_boundary = boundary
244
+ if hasattr(self.lexer, 'paren_depth'):
245
+ self.lexer.paren_depth = paren_depth
246
+ if hasattr(self.lexer, 'bracket_depth'):
247
+ self.lexer.bracket_depth = bracket_depth
248
+ if hasattr(self.lexer, 'brace_depth'):
249
+ self.lexer.brace_depth = brace_depth
142
250
 
143
251
  # ------------------------------------------------------------------
144
252
  # Legacy compatibility helpers
@@ -205,6 +313,13 @@ class UltimateParser:
205
313
  if not self.use_advanced_parsing:
206
314
  return self._parse_traditional()
207
315
 
316
+ # Large-file stability: advanced parsing performs whole-file analysis
317
+ # that can become expensive on very large sources. For long files, fall
318
+ # back to the traditional streaming parser which is more predictable.
319
+ if self._should_disable_advanced_for_size():
320
+ self.use_advanced_parsing = False
321
+ return self._parse_traditional()
322
+
208
323
  try:
209
324
  # OPTIMIZATION: Check if we already have tokens cached
210
325
  if not hasattr(self, '_cached_tokens'):
@@ -212,11 +327,43 @@ class UltimateParser:
212
327
 
213
328
  all_tokens = self._cached_tokens
214
329
 
330
+ # If the token stream is huge, prefer the traditional parser to avoid
331
+ # heavy multi-pass analysis and large intermediate structures.
332
+ try:
333
+ max_tokens = getattr(config, 'advanced_parsing_max_tokens', 50000)
334
+ if isinstance(max_tokens, int) and max_tokens > 0 and len(all_tokens) > max_tokens:
335
+ self.use_advanced_parsing = False
336
+ return self._parse_traditional()
337
+ except Exception:
338
+ pass
339
+
215
340
  # Arrow lambdas currently parse reliably via the traditional engine.
216
- # When the token stream contains the '=>' literal, switch to the
217
- # classic parser immediately to keep AST output deterministic.
218
- if any(t.type == LAMBDA and getattr(t, 'literal', None) == '=>'
219
- for t in all_tokens):
341
+ # When the token stream contains the '=>' literal OUTSIDE of a match
342
+ # block, switch to the classic parser to keep AST output deterministic.
343
+ # Match blocks use '=>' for case arms (e.g. 42 => "answer") so we
344
+ # must NOT bail out when arrows only appear inside match bodies.
345
+ has_non_match_arrow = False
346
+ in_match_brace = False
347
+ match_brace_depth = 0
348
+ for idx, t in enumerate(all_tokens):
349
+ if t.type == MATCH:
350
+ # Look ahead for opening brace
351
+ for k in range(idx + 1, min(idx + 10, len(all_tokens))):
352
+ if all_tokens[k].type == LBRACE:
353
+ in_match_brace = True
354
+ match_brace_depth = 1
355
+ break
356
+ elif in_match_brace:
357
+ if t.type == LBRACE:
358
+ match_brace_depth += 1
359
+ elif t.type == RBRACE:
360
+ match_brace_depth -= 1
361
+ if match_brace_depth == 0:
362
+ in_match_brace = False
363
+ elif t.type == LAMBDA and getattr(t, 'literal', None) == '=>':
364
+ has_non_match_arrow = True
365
+ break
366
+ if has_non_match_arrow:
220
367
  self.use_advanced_parsing = False
221
368
  return self._parse_traditional()
222
369
 
@@ -242,11 +389,18 @@ class UltimateParser:
242
389
 
243
390
  if self._should_verify_with_traditional(program, all_tokens):
244
391
  fallback_program, fallback_errors = self._parse_traditional_copy()
245
- if fallback_program and len(fallback_program.statements) > len(program.statements):
246
- self._log("🔁 Traditional parser produced a richer AST; switching to fallback result", "normal")
247
- self.errors = list(fallback_errors or [])
248
- self.use_advanced_parsing = False
249
- return fallback_program
392
+ # Only prefer the traditional parser if it produces significantly more
393
+ # statements (>50% more). A small difference often means the advanced
394
+ # parser correctly merged compound constructs (e.g. let x = match {...})
395
+ # that the traditional parser fragments into separate pieces.
396
+ if fallback_program:
397
+ adv_count = len(program.statements)
398
+ trad_count = len(fallback_program.statements)
399
+ if adv_count > 0 and trad_count > adv_count * 1.5:
400
+ self._log("🔁 Traditional parser produced a richer AST; switching to fallback result", "normal")
401
+ self.errors = list(fallback_errors or [])
402
+ self.use_advanced_parsing = False
403
+ return fallback_program
250
404
 
251
405
  self._log(f"✅ Parsing Complete: {len(program.statements)} statements, {len(self.errors)} errors", "minimal")
252
406
  return program
@@ -256,6 +410,26 @@ class UltimateParser:
256
410
  self.use_advanced_parsing = False
257
411
  return self._parse_traditional()
258
412
 
413
+ def _should_disable_advanced_for_size(self) -> bool:
414
+ """Heuristic guardrail to keep very large inputs stable.
415
+
416
+ Uses source line-count (cheap) to decide whether to skip advanced parsing.
417
+ """
418
+ try:
419
+ source = getattr(self.lexer, 'input', None)
420
+ if not isinstance(source, str) or not source:
421
+ return False
422
+
423
+ max_lines = getattr(config, 'advanced_parsing_max_lines', 2000)
424
+ if isinstance(max_lines, int) and max_lines > 0:
425
+ # count('\n') is linear but cheap compared to tokenization + analysis
426
+ line_count = source.count('\n') + 1
427
+ if line_count > max_lines:
428
+ return True
429
+ except Exception:
430
+ return False
431
+ return False
432
+
259
433
  def _advanced_result_needs_fallback(self, program):
260
434
  """Detect obvious advanced-parser failures and trigger traditional fallback."""
261
435
  try:
@@ -387,6 +561,10 @@ class UltimateParser:
387
561
  original_line = self.lexer.line
388
562
  original_column = self.lexer.column
389
563
  original_last_token_type = self.lexer.last_token_type
564
+ original_boundary = getattr(self.lexer, 'at_statement_boundary', True)
565
+ original_paren_depth = getattr(self.lexer, 'paren_depth', 0)
566
+ original_bracket_depth = getattr(self.lexer, 'bracket_depth', 0)
567
+ original_brace_depth = getattr(self.lexer, 'brace_depth', 0)
390
568
  original_cur = self.cur_token
391
569
  original_peek = self.peek_token
392
570
 
@@ -395,6 +573,14 @@ class UltimateParser:
395
573
  self.lexer.read_position = 0
396
574
  self.lexer.ch = ''
397
575
  self.lexer.last_token_type = None # ✅ CRITICAL: Reset context-aware state
576
+ if hasattr(self.lexer, 'at_statement_boundary'):
577
+ self.lexer.at_statement_boundary = True
578
+ if hasattr(self.lexer, 'paren_depth'):
579
+ self.lexer.paren_depth = 0
580
+ if hasattr(self.lexer, 'bracket_depth'):
581
+ self.lexer.bracket_depth = 0
582
+ if hasattr(self.lexer, 'brace_depth'):
583
+ self.lexer.brace_depth = 0
398
584
  self.lexer.read_char()
399
585
 
400
586
  # OPTIMIZATION: Pre-allocate list with reasonable capacity
@@ -420,6 +606,14 @@ class UltimateParser:
420
606
  self.lexer.line = original_line
421
607
  self.lexer.column = original_column
422
608
  self.lexer.last_token_type = original_last_token_type
609
+ if hasattr(self.lexer, 'at_statement_boundary'):
610
+ self.lexer.at_statement_boundary = original_boundary
611
+ if hasattr(self.lexer, 'paren_depth'):
612
+ self.lexer.paren_depth = original_paren_depth
613
+ if hasattr(self.lexer, 'bracket_depth'):
614
+ self.lexer.bracket_depth = original_bracket_depth
615
+ if hasattr(self.lexer, 'brace_depth'):
616
+ self.lexer.brace_depth = original_brace_depth
423
617
  self.cur_token = original_cur
424
618
  self.peek_token = original_peek
425
619
 
@@ -444,11 +638,21 @@ class UltimateParser:
444
638
  try:
445
639
  statement = self.context_parser.parse_block(block_info, all_tokens)
446
640
  if statement:
447
- program.statements.append(statement)
448
- parsed_count += 1
449
- if config.enable_debug_logs: # Only show detailed parsing in verbose mode
450
- stmt_type = type(statement).__name__
451
- self._log(f" ✅ Parsed: {stmt_type} at line {block_info['start_token'].line}", "verbose")
641
+ # Unwrap synthetic BlockStatements emitted by context strategies so inner statements flow to the program
642
+ from ..zexus_ast import BlockStatement as _BlockStatement
643
+
644
+ if isinstance(statement, _BlockStatement) and getattr(statement, "statements", None):
645
+ program.statements.extend(statement.statements)
646
+ parsed_count += len(statement.statements)
647
+ if config.enable_debug_logs:
648
+ stmt_types = ", ".join(type(stmt).__name__ for stmt in statement.statements)
649
+ self._log(f" ✅ Parsed composite block [{stmt_types}] at line {block_info['start_token'].line}", "verbose")
650
+ else:
651
+ program.statements.append(statement)
652
+ parsed_count += 1
653
+ if config.enable_debug_logs: # Only show detailed parsing in verbose mode
654
+ stmt_type = type(statement).__name__
655
+ self._log(f" ✅ Parsed: {stmt_type} at line {block_info['start_token'].line}", "verbose")
452
656
 
453
657
  except Exception as e:
454
658
  error_msg = f"Line {block_info['start_token'].line}: {str(e)}"
@@ -511,175 +715,22 @@ class UltimateParser:
511
715
  # Skip stray semicolons that may appear between statements
512
716
  if self.cur_token_is(SEMICOLON):
513
717
  return None
718
+ if self.cur_token_is(RBRACE):
719
+ return None
514
720
  try:
515
721
  node = None
516
- if self.cur_token_is(LET):
517
- node = self.parse_let_statement()
518
- elif self.cur_token_is(CONST):
519
- node = self.parse_const_statement()
520
- elif self.cur_token_is(DATA):
521
- node = self.parse_data_statement()
522
- elif self.cur_token_is(RETURN):
523
- node = self.parse_return_statement()
524
- elif self.cur_token_is(CONTINUE):
525
- node = self.parse_continue_statement()
526
- elif self.cur_token_is(BREAK):
527
- node = self.parse_break_statement()
528
- elif self.cur_token_is(THROW):
529
- node = self.parse_throw_statement()
530
- elif self.cur_token_is(PRINT):
531
- node = self.parse_print_statement()
532
- elif self.cur_token_is(FOR):
533
- node = self.parse_for_each_statement()
534
- elif self.cur_token_is(SCREEN):
535
- node = self.parse_screen_statement()
536
- elif self.cur_token_is(COLOR):
537
- node = self.parse_color_statement()
538
- elif self.cur_token_is(CANVAS):
539
- node = self.parse_canvas_statement()
540
- elif self.cur_token_is(GRAPHICS):
541
- node = self.parse_graphics_statement()
542
- elif self.cur_token_is(ANIMATION):
543
- node = self.parse_animation_statement()
544
- elif self.cur_token_is(CLOCK):
545
- node = self.parse_clock_statement()
546
- elif self.cur_token_is(ACTION):
547
- node = self.parse_action_statement()
548
- elif self.cur_token_is(FUNCTION):
549
- node = self.parse_function_statement()
550
- elif self.cur_token_is(IF):
551
- node = self.parse_if_statement()
552
- elif self.cur_token_is(WHILE):
553
- node = self.parse_while_statement()
554
- elif self.cur_token_is(USE):
555
- node = self.parse_use_statement()
556
- elif self.cur_token_is(EXACTLY):
557
- node = self.parse_exactly_statement()
558
- elif self.cur_token_is(EXPORT):
559
- node = self.parse_export_statement()
560
- elif self.cur_token_is(DEBUG):
561
- node = self.parse_debug_statement()
562
- elif self.cur_token_is(TRY):
563
- node = self.parse_try_catch_statement()
564
- elif self.cur_token_is(EXTERNAL):
565
- node = self.parse_external_declaration()
566
- elif self.cur_token_is(ENTITY):
567
- node = self.parse_entity_statement()
568
- elif self.cur_token_is(VERIFY):
569
- node = self.parse_verify_statement()
570
- elif self.cur_token_is(CONTRACT):
571
- node = self.parse_contract_statement()
572
- elif self.cur_token_is(PROTECT):
573
- node = self.parse_protect_statement()
574
- elif self.cur_token_is(SEAL):
575
- node = self.parse_seal_statement()
576
- elif self.cur_token_is(AUDIT):
577
- node = self.parse_audit_statement()
578
- elif self.cur_token_is(RESTRICT):
579
- node = self.parse_restrict_statement()
580
- elif self.cur_token_is(SANDBOX):
581
- node = self.parse_sandbox_statement()
582
- elif self.cur_token_is(TRAIL):
583
- node = self.parse_trail_statement()
584
- elif self.cur_token_is(TX):
585
- node = self.parse_tx_statement()
586
- elif self.cur_token_is(NATIVE):
587
- node = self.parse_native_statement()
588
- elif self.cur_token_is(GC):
589
- node = self.parse_gc_statement()
590
- elif self.cur_token_is(INLINE):
591
- node = self.parse_inline_statement()
592
- elif self.cur_token_is(BUFFER):
593
- node = self.parse_buffer_statement()
594
- elif self.cur_token_is(SIMD):
595
- node = self.parse_simd_statement()
596
- elif self.cur_token_is(DEFER):
597
- node = self.parse_defer_statement()
598
- elif self.cur_token_is(PATTERN):
599
- node = self.parse_pattern_statement()
600
- elif self.cur_token_is(ENUM):
601
- node = self.parse_enum_statement()
602
- elif self.cur_token_is(STREAM):
603
- node = self.parse_stream_statement()
604
- elif self.cur_token_is(WATCH):
605
- print(f"[PARSE_STMT] Matched WATCH", file=sys.stderr, flush=True)
606
- node = self.parse_watch_statement()
607
- elif self.cur_token_is(EMIT):
608
- print(f"[PARSE_STMT] Matched EMIT", file=sys.stderr, flush=True)
609
- node = self.parse_emit_statement()
610
- elif self.cur_token_is(MODIFIER):
611
- print(f"[PARSE_STMT] Matched MODIFIER", file=sys.stderr, flush=True)
612
- node = self.parse_modifier_declaration()
613
- # === SECURITY STATEMENT HANDLERS ===
614
- elif self.cur_token_is(CAPABILITY):
615
- print(f"[PARSE_STMT] Matched CAPABILITY", file=sys.stderr, flush=True)
616
- node = self.parse_capability_statement()
617
- elif self.cur_token_is(GRANT):
618
- print(f"[PARSE_STMT] Matched GRANT", file=sys.stderr, flush=True)
619
- node = self.parse_grant_statement()
620
- elif self.cur_token_is(REVOKE):
621
- print(f"[PARSE_STMT] Matched REVOKE", file=sys.stderr, flush=True)
622
- node = self.parse_revoke_statement()
623
- elif self.cur_token_is(VALIDATE):
624
- print(f"[PARSE_STMT] Matched VALIDATE", file=sys.stderr, flush=True)
625
- node = self.parse_validate_statement()
626
- elif self.cur_token_is(SANITIZE):
627
- print(f"[PARSE_STMT] Matched SANITIZE", file=sys.stderr, flush=True)
628
- node = self.parse_sanitize_statement()
629
- elif self.cur_token_is(INJECT):
630
- print(f"[PARSE_STMT] Matched INJECT", file=sys.stderr, flush=True)
631
- node = self.parse_inject_statement()
632
- elif self.cur_token_is(IMMUTABLE):
633
- print(f"[PARSE_STMT] Matched IMMUTABLE", file=sys.stderr, flush=True)
634
- node = self.parse_immutable_statement()
635
- # === COMPLEXITY STATEMENT HANDLERS ===
636
- elif self.cur_token_is(INTERFACE):
637
- print(f"[PARSE_STMT] Matched INTERFACE", file=sys.stderr, flush=True)
638
- node = self.parse_interface_statement()
639
- elif self.cur_token_is(TYPE_ALIAS):
640
- print(f"[PARSE_STMT] Matched TYPE_ALIAS", file=sys.stderr, flush=True)
641
- node = self.parse_type_alias_statement()
642
- elif self.cur_token_is(MODULE):
643
- print(f"[PARSE_STMT] Matched MODULE", file=sys.stderr, flush=True)
644
- node = self.parse_module_statement()
645
- elif self.cur_token_is(PACKAGE):
646
- print(f"[PARSE_STMT] Matched PACKAGE", file=sys.stderr, flush=True)
647
- node = self.parse_package_statement()
648
- elif self.cur_token_is(USING):
649
- print(f"[PARSE_STMT] Matched USING", file=sys.stderr, flush=True)
650
- node = self.parse_using_statement()
651
- elif self.cur_token_is(CHANNEL):
652
- print(f"[PARSE_STMT] Matched CHANNEL", file=sys.stderr, flush=True)
653
- node = self.parse_channel_statement()
654
- elif self.cur_token_is(SEND):
655
- print(f"[PARSE_STMT] Matched SEND", file=sys.stderr, flush=True)
656
- node = self.parse_send_statement()
657
- elif self.cur_token_is(RECEIVE):
658
- print(f"[PARSE_STMT] Matched RECEIVE", file=sys.stderr, flush=True)
659
- node = self.parse_receive_statement()
660
- elif self.cur_token_is(ATOMIC):
661
- print(f"[PARSE_STMT] Matched ATOMIC", file=sys.stderr, flush=True)
662
- node = self.parse_atomic_statement()
663
- # === BLOCKCHAIN STATEMENT HANDLERS ===
664
- elif self.cur_token_is(LEDGER):
665
- print(f"[PARSE_STMT] Matched LEDGER", file=sys.stderr, flush=True)
666
- node = self.parse_ledger_statement()
667
- elif self.cur_token_is(STATE):
668
- print(f"[PARSE_STMT] Matched STATE", file=sys.stderr, flush=True)
669
- node = self.parse_state_statement()
670
- elif self.cur_token_is(REQUIRE):
671
- node = self.parse_require_statement()
672
- elif self.cur_token_is(REVERT):
673
- print(f"[PARSE_STMT] Matched REVERT", file=sys.stderr, flush=True)
674
- node = self.parse_revert_statement()
675
- elif self.cur_token_is(LIMIT):
676
- print(f"[PARSE_STMT] Matched LIMIT", file=sys.stderr, flush=True)
677
- node = self.parse_limit_statement()
722
+ tok_type = self.cur_token.type
723
+ handler = self._statement_dispatch.get(tok_type)
724
+ if handler is not None:
725
+ node = handler()
678
726
  else:
679
- print(f"[PARSE_STMT] No match, falling back to expression statement", file=sys.stderr, flush=True)
680
727
  node = self.parse_expression_statement()
681
728
 
682
729
  if node is not None:
730
+ # Attach source location for debugger / error reporting
731
+ if self.cur_token and not getattr(node, 'line', 0):
732
+ node.line = getattr(self.cur_token, 'line', 0) or 0
733
+ node.column = getattr(self.cur_token, 'column', 0) or 0
683
734
  return attach_modifiers(node, modifiers)
684
735
  return None
685
736
  except Exception as e:
@@ -736,8 +787,13 @@ class UltimateParser:
736
787
  """Parse { } block with tolerance for missing closing brace"""
737
788
  block = BlockStatement()
738
789
  self.next_token()
739
- import sys
740
- print(f"[BLOCK_START] Entering brace block, first token: {self.cur_token.type}={repr(self.cur_token.literal)}", file=sys.stderr, flush=True)
790
+ debug_enabled = config.enable_debug_logs
791
+ if debug_enabled:
792
+ print(
793
+ f"[BLOCK_START] Entering brace block, first token: {self.cur_token.type}={repr(self.cur_token.literal)}",
794
+ file=sys.stderr,
795
+ flush=True,
796
+ )
741
797
 
742
798
  brace_count = 1
743
799
  stmt_count = 0
@@ -748,19 +804,38 @@ class UltimateParser:
748
804
  brace_count -= 1
749
805
  if brace_count == 0:
750
806
  break
807
+ # Skip standalone closing braces from nested blocks without parsing a statement
808
+ self.next_token()
809
+ continue
751
810
 
752
- print(f"[BLOCK_STMT] About to parse statement {stmt_count}, token: {self.cur_token.type}={repr(self.cur_token.literal)}", file=sys.stderr, flush=True)
811
+ if debug_enabled:
812
+ print(
813
+ f"[BLOCK_STMT] About to parse statement {stmt_count}, token: {self.cur_token.type}={repr(self.cur_token.literal)}",
814
+ file=sys.stderr,
815
+ flush=True,
816
+ )
753
817
  stmt = self.parse_statement()
754
- print(f"[BLOCK_STMT] Parsed statement {stmt_count}: {type(stmt).__name__ if stmt else 'None'}", file=sys.stderr, flush=True)
818
+ if debug_enabled:
819
+ print(
820
+ f"[BLOCK_STMT] Parsed statement {stmt_count}: {type(stmt).__name__ if stmt else 'None'}",
821
+ file=sys.stderr,
822
+ flush=True,
823
+ )
755
824
  if stmt is not None:
756
825
  block.statements.append(stmt)
757
826
  self.next_token()
758
827
  stmt_count += 1
759
828
 
760
- print(f"[BLOCK_END] Finished block with {len(block.statements)} statements", file=sys.stderr, flush=True)
829
+ if debug_enabled:
830
+ print(
831
+ f"[BLOCK_END] Finished block with {len(block.statements)} statements",
832
+ file=sys.stderr,
833
+ flush=True,
834
+ )
761
835
  # TOLERANT: Don't error if we hit EOF without closing brace
762
836
  if self.cur_token_is(EOF) and brace_count > 0:
763
- self.errors.append(f"Line {self.cur_token.line}: Unclosed block (reached EOF)")
837
+ # Tolerant mode: allow missing closing braces at EOF without failing hard
838
+ brace_count = 0
764
839
 
765
840
  return block
766
841
 
@@ -781,8 +856,9 @@ class UltimateParser:
781
856
 
782
857
  def parse_if_statement(self):
783
858
  """Tolerant if statement parser with elif support"""
784
- import sys
785
- print(f"[PARSE_IF] Starting if statement parsing", file=sys.stderr, flush=True)
859
+ debug_enabled = config.enable_debug_logs
860
+ if debug_enabled:
861
+ print("[PARSE_IF] Starting if statement parsing", file=sys.stderr, flush=True)
786
862
  # Skip IF token
787
863
  self.next_token()
788
864
 
@@ -804,48 +880,99 @@ class UltimateParser:
804
880
  )
805
881
  raise error
806
882
 
807
- print(f"[PARSE_IF] Parsed condition, now at token: {self.cur_token.type}={repr(self.cur_token.literal)}", file=sys.stderr, flush=True)
883
+ if debug_enabled:
884
+ print(
885
+ f"[PARSE_IF] Parsed condition, now at token: {self.cur_token.type}={repr(self.cur_token.literal)}",
886
+ file=sys.stderr,
887
+ flush=True,
888
+ )
808
889
  # Parse consequence (flexible block style)
809
890
  consequence = self.parse_block("if")
810
- print(f"[PARSE_IF] Parsed consequence block, now at token: {self.cur_token.type}={repr(self.cur_token.literal)}", file=sys.stderr, flush=True)
891
+ if debug_enabled:
892
+ print(
893
+ f"[PARSE_IF] Parsed consequence block, now at token: {self.cur_token.type}={repr(self.cur_token.literal)}",
894
+ file=sys.stderr,
895
+ flush=True,
896
+ )
811
897
  if not consequence:
812
898
  return None
813
899
 
814
- # Parse elif clauses
815
- elif_parts = []
816
- while self.cur_token_is(ELIF):
817
- self.next_token() # Move past elif
818
-
819
- # Parse elif condition (with or without parentheses)
900
+ def _parse_conditional_clause(keyword):
901
+ """Parse an if/elif/else-if condition allowing optional parentheses."""
820
902
  if self.cur_token_is(LPAREN):
821
903
  self.next_token() # Skip (
822
- elif_condition = self.parse_expression(LOWEST)
904
+ clause_condition = self.parse_expression(LOWEST)
823
905
  if not self.expect_peek(RPAREN):
824
- # Expected closing paren after elif condition
825
906
  return None
826
907
  else:
827
- # No parentheses - parse expression directly
828
- elif_condition = self.parse_expression(LOWEST)
829
-
830
- if not elif_condition:
908
+ clause_condition = self.parse_expression(LOWEST)
909
+
910
+ if not clause_condition:
831
911
  error = self._create_parse_error(
832
- "Expected condition after 'elif'",
833
- suggestion="Add a condition expression: elif (condition) { ... }"
912
+ f"Expected condition after '{keyword}'",
913
+ suggestion=f"Add a condition expression: {keyword} (condition) {{ ... }}"
834
914
  )
835
915
  raise error
836
-
837
- # Parse elif consequence block
838
- elif_consequence = self.parse_block("elif")
839
- if not elif_consequence:
840
- return None
841
-
842
- elif_parts.append((elif_condition, elif_consequence))
843
916
 
844
- # Parse else clause
917
+ return clause_condition
918
+
919
+ # Parse elif / else-if chains (using lookahead so we keep the closing brace as current token)
920
+ elif_parts = []
845
921
  alternative = None
846
- if self.cur_token_is(ELSE):
847
- self.next_token()
848
- alternative = self.parse_block("else")
922
+
923
+ while True:
924
+ if debug_enabled:
925
+ print(
926
+ f"[PARSE_IF] After consequence, current={self.cur_token.type}, peek={self.peek_token.type if self.peek_token else None}",
927
+ file=sys.stderr,
928
+ flush=True,
929
+ )
930
+ if self.peek_token_is(ELIF):
931
+ self.next_token() # Move to 'elif'
932
+ self.next_token() # Advance to first token of condition
933
+ if debug_enabled:
934
+ print("[PARSE_IF] Detected 'elif' clause", file=sys.stderr, flush=True)
935
+ clause_condition = _parse_conditional_clause("elif")
936
+ if clause_condition is None:
937
+ return None
938
+
939
+ clause_block = self.parse_block("elif")
940
+ if not clause_block:
941
+ return None
942
+
943
+ elif_parts.append((clause_condition, clause_block))
944
+ continue
945
+
946
+ if self.peek_token_is(ELSE):
947
+ self.next_token() # Move to 'else'
948
+
949
+ # Support `else if` by converting it into another elif clause
950
+ if self.peek_token_is(IF):
951
+ self.next_token() # Move to 'if'
952
+ self.next_token() # Advance to first token of condition
953
+ if debug_enabled:
954
+ print("[PARSE_IF] Detected 'else if' clause", file=sys.stderr, flush=True)
955
+ clause_condition = _parse_conditional_clause("else if")
956
+ if clause_condition is None:
957
+ return None
958
+
959
+ clause_block = self.parse_block("elif")
960
+ if not clause_block:
961
+ return None
962
+
963
+ elif_parts.append((clause_condition, clause_block))
964
+ if debug_enabled:
965
+ print("[PARSE_IF] Completed 'else if' clause", file=sys.stderr, flush=True)
966
+ continue
967
+
968
+ if debug_enabled:
969
+ print("[PARSE_IF] Detected plain 'else' clause", file=sys.stderr, flush=True)
970
+ alternative = self.parse_block("else")
971
+ if not alternative:
972
+ return None
973
+ break
974
+
975
+ break
849
976
 
850
977
  return IfStatement(condition=condition, consequence=consequence, elif_parts=elif_parts, alternative=alternative)
851
978
 
@@ -951,11 +1078,101 @@ class UltimateParser:
951
1078
 
952
1079
  return FunctionStatement(name=name, parameters=parameters, body=body, return_type=return_type)
953
1080
 
1081
+ def _parse_destructure_pattern(self):
1082
+ """Parse a destructuring pattern: {a, b: renamed} or [x, y, ..rest]"""
1083
+ from ..zexus_ast import DestructurePattern
1084
+ if self.cur_token_is(LBRACE):
1085
+ # Map destructuring: {a, b, c: renamed}
1086
+ bindings = []
1087
+ self.next_token() # skip {
1088
+ while not self.cur_token_is(RBRACE) and not self.cur_token_is(EOF):
1089
+ if self.cur_token_is(COMMA):
1090
+ self.next_token()
1091
+ continue
1092
+ if not self.cur_token_is(IDENT):
1093
+ self.errors.append(f"Expected identifier in map destructure, got {self.cur_token.type}")
1094
+ return None
1095
+ source_key = self.cur_token.literal
1096
+ target_name = source_key # default: same name
1097
+ if self.peek_token_is(COLON):
1098
+ self.next_token() # skip :
1099
+ self.next_token() # move to target name
1100
+ if not self.cur_token_is(IDENT):
1101
+ self.errors.append("Expected identifier after ':' in map destructure")
1102
+ return None
1103
+ target_name = self.cur_token.literal
1104
+ bindings.append((source_key, target_name))
1105
+ self.next_token()
1106
+ # cur_token should be RBRACE
1107
+ return DestructurePattern(kind='map', bindings=bindings)
1108
+ elif self.cur_token_is(LBRACKET):
1109
+ # List destructuring: [x, y, ..rest]
1110
+ bindings = []
1111
+ rest = None
1112
+ idx = 0
1113
+ self.next_token() # skip [
1114
+ while not self.cur_token_is(RBRACKET) and not self.cur_token_is(EOF):
1115
+ if self.cur_token_is(COMMA):
1116
+ self.next_token()
1117
+ continue
1118
+ # Check for rest element: ..rest (lexed as DOT DOT IDENT)
1119
+ if self.cur_token.literal == '.':
1120
+ # Consume second dot
1121
+ self.next_token()
1122
+ if self.cur_token.literal == '.':
1123
+ self.next_token() # move to rest identifier
1124
+ if self.cur_token_is(IDENT):
1125
+ rest = self.cur_token.literal
1126
+ self.next_token()
1127
+ continue
1128
+ # If not a valid ..rest, skip
1129
+ continue
1130
+ if self.cur_token_is(IDENT) and self.cur_token.literal.startswith('..'):
1131
+ rest = self.cur_token.literal[2:]
1132
+ self.next_token()
1133
+ continue
1134
+ if not self.cur_token_is(IDENT):
1135
+ self.errors.append(f"Expected identifier in list destructure, got {self.cur_token.type}")
1136
+ return None
1137
+ bindings.append((idx, self.cur_token.literal))
1138
+ idx += 1
1139
+ self.next_token()
1140
+ # cur_token should be RBRACKET
1141
+ return DestructurePattern(kind='list', bindings=bindings, rest=rest)
1142
+ return None
1143
+
954
1144
  def parse_let_statement(self):
955
- """Tolerant let statement parser"""
1145
+ """Tolerant let statement parser with destructuring and type annotation support
1146
+
1147
+ let x = value
1148
+ let x: int = value (type annotation)
1149
+ let {a, b} = map_expr
1150
+ let [x, y] = list_expr
1151
+ """
956
1152
  stmt = LetStatement(name=None, value=None)
957
1153
 
958
- if not self.expect_peek(IDENT):
1154
+ # Check for destructuring pattern
1155
+ if self.peek_token_is(LBRACE) or self.peek_token_is(LBRACKET):
1156
+ self.next_token() # move to { or [
1157
+ pattern = self._parse_destructure_pattern()
1158
+ if pattern is None:
1159
+ return None
1160
+ stmt.name = pattern
1161
+ # Expect = after pattern
1162
+ if self.peek_token_is(ASSIGN):
1163
+ self.next_token()
1164
+ else:
1165
+ self.errors.append("Expected '=' after destructuring pattern")
1166
+ return None
1167
+ self.next_token()
1168
+ stmt.value = self.parse_expression(LOWEST)
1169
+ if self.peek_token_is(SEMICOLON):
1170
+ self.next_token()
1171
+ return stmt
1172
+
1173
+ if self.peek_token_is(IDENT) or self.peek_token_is(EVENT):
1174
+ self.next_token()
1175
+ else:
959
1176
  error = self._create_parse_error(
960
1177
  "Expected variable name after 'let'",
961
1178
  suggestion="Use 'let' to declare a variable: let myVariable = value"
@@ -964,8 +1181,33 @@ class UltimateParser:
964
1181
 
965
1182
  stmt.name = Identifier(value=self.cur_token.literal)
966
1183
 
967
- # TOLERANT: Allow both = and : for assignment
968
- if self.peek_token_is(ASSIGN) or (self.peek_token_is(COLON) and self.peek_token.literal == ":"):
1184
+ # Disambiguate `:` could be type annotation (let x: int = ...) or
1185
+ # old-style assignment (let x: value). If `:` is followed by an
1186
+ # IDENT and then `=`, treat it as a type annotation.
1187
+ if self.peek_token_is(COLON) and self.peek_token.literal == ":":
1188
+ # Peek two ahead to see if this is `name: TYPE = value`
1189
+ saved_pos = getattr(self, '_saved_pos', None)
1190
+ # Manual two-token lookahead
1191
+ self.next_token() # move to :
1192
+ if self.peek_token_is(IDENT):
1193
+ # Could be type annotation — check if IDENT is followed by =
1194
+ type_tok = self.peek_token
1195
+ self.next_token() # move to potential type token
1196
+ if self.peek_token_is(ASSIGN):
1197
+ # It IS a type annotation: let x: int = value
1198
+ stmt.type_annotation = self.cur_token.literal
1199
+ self.next_token() # move to =
1200
+ else:
1201
+ # It's old-style assignment: let x: value
1202
+ # cur_token is the first token of the value expression
1203
+ stmt.value = self.parse_expression(LOWEST)
1204
+ if self.peek_token_is(SEMICOLON):
1205
+ self.next_token()
1206
+ return stmt
1207
+ else:
1208
+ # Not IDENT after `:` — old-style assignment
1209
+ pass # fall through to parse value
1210
+ elif self.peek_token_is(ASSIGN):
969
1211
  self.next_token()
970
1212
  else:
971
1213
  self.errors.append("Expected '=' or ':' after variable name")
@@ -981,20 +1223,59 @@ class UltimateParser:
981
1223
  return stmt
982
1224
 
983
1225
  def parse_const_statement(self):
984
- """Tolerant const statement parser - immutable variable declaration
1226
+ """Tolerant const statement parser with destructuring and type annotation support
985
1227
 
986
- Syntax: const NAME = value;
1228
+ const NAME = value;
1229
+ const PI: float = 3.14;
1230
+ const {a, b} = map_expr;
1231
+ const [x, y] = list_expr;
987
1232
  """
988
1233
  stmt = ConstStatement(name=None, value=None)
989
1234
 
990
- if not self.expect_peek(IDENT):
1235
+ # Check for destructuring pattern
1236
+ if self.peek_token_is(LBRACE) or self.peek_token_is(LBRACKET):
1237
+ self.next_token() # move to { or [
1238
+ pattern = self._parse_destructure_pattern()
1239
+ if pattern is None:
1240
+ return None
1241
+ stmt.name = pattern
1242
+ if self.peek_token_is(ASSIGN):
1243
+ self.next_token()
1244
+ else:
1245
+ self.errors.append("Expected '=' after destructuring pattern")
1246
+ return None
1247
+ self.next_token()
1248
+ stmt.value = self.parse_expression(LOWEST)
1249
+ if self.peek_token_is(SEMICOLON):
1250
+ self.next_token()
1251
+ return stmt
1252
+
1253
+ if self.peek_token_is(IDENT) or self.peek_token_is(EVENT):
1254
+ self.next_token()
1255
+ else:
991
1256
  self.errors.append("Expected variable name after 'const'")
992
1257
  return None
993
1258
 
994
1259
  stmt.name = Identifier(value=self.cur_token.literal)
995
1260
 
996
- # TOLERANT: Allow both = and : for assignment
997
- if self.peek_token_is(ASSIGN) or (self.peek_token_is(COLON) and self.peek_token.literal == ":"):
1261
+ # Disambiguate `:` type annotation vs old-style assignment
1262
+ if self.peek_token_is(COLON) and self.peek_token.literal == ":":
1263
+ self.next_token() # move to :
1264
+ if self.peek_token_is(IDENT):
1265
+ type_tok = self.peek_token
1266
+ self.next_token() # move to potential type token
1267
+ if self.peek_token_is(ASSIGN):
1268
+ stmt.type_annotation = self.cur_token.literal
1269
+ self.next_token() # move to =
1270
+ else:
1271
+ # Old-style assignment: const x: value
1272
+ stmt.value = self.parse_expression(LOWEST)
1273
+ if self.peek_token_is(SEMICOLON):
1274
+ self.next_token()
1275
+ return stmt
1276
+ else:
1277
+ pass # fall through to parse value
1278
+ elif self.peek_token_is(ASSIGN):
998
1279
  self.next_token()
999
1280
  else:
1000
1281
  self.errors.append("Expected '=' or ':' after variable name in const declaration")
@@ -1368,13 +1649,14 @@ class UltimateParser:
1368
1649
  """
1369
1650
  import sys
1370
1651
  # Debug logging (fail silently if file operations fail)
1371
- try:
1372
- log_path = os.path.join(tempfile.gettempdir(), 'parser_log.txt')
1373
- with open(log_path, 'a') as f:
1374
- f.write(f"=== parse_print_statement CALLED ===\n")
1375
- f.flush()
1376
- except (IOError, OSError, PermissionError):
1377
- pass # Silently ignore debug logging errors
1652
+ if getattr(config, 'enable_parser_debug', False):
1653
+ try:
1654
+ log_path = os.path.join(tempfile.gettempdir(), 'parser_log.txt')
1655
+ with open(log_path, 'a') as f:
1656
+ f.write(f"=== parse_print_statement CALLED ===\n")
1657
+ f.flush()
1658
+ except (IOError, OSError, PermissionError):
1659
+ pass # Silently ignore debug logging errors
1378
1660
 
1379
1661
  stmt = PrintStatement(values=[])
1380
1662
  self.next_token()
@@ -1442,10 +1724,17 @@ class UltimateParser:
1442
1724
  if not catch_block:
1443
1725
  return None
1444
1726
 
1727
+ # Check for optional 'finally' block
1728
+ finally_block = None
1729
+ if self.peek_token_is(FINALLY):
1730
+ self.next_token() # consume 'finally'
1731
+ finally_block = self.parse_block("finally")
1732
+
1445
1733
  return TryCatchStatement(
1446
1734
  try_block=try_block,
1447
1735
  error_variable=error_var,
1448
- catch_block=catch_block
1736
+ catch_block=catch_block,
1737
+ finally_block=finally_block
1449
1738
  )
1450
1739
 
1451
1740
  def parse_debug_statement(self):
@@ -1665,6 +1954,29 @@ class UltimateParser:
1665
1954
  expression.value = self.parse_expression(LOWEST)
1666
1955
  return expression
1667
1956
 
1957
+ def parse_compound_assignment_expression(self, left):
1958
+ """Parse compound assignment: x += 5 → x = x + 5"""
1959
+ if not isinstance(left, (Identifier, PropertyAccessExpression)):
1960
+ self.errors.append(f"Line {self.cur_token.line}:{self.cur_token.column} - Cannot use compound assignment on {type(left).__name__}, only identifiers and properties allowed")
1961
+ return None
1962
+
1963
+ # Map compound operator token to the underlying arithmetic operator
1964
+ op_map = {
1965
+ PLUS_ASSIGN: "+",
1966
+ MINUS_ASSIGN: "-",
1967
+ STAR_ASSIGN: "*",
1968
+ SLASH_ASSIGN: "/",
1969
+ MOD_ASSIGN: "%",
1970
+ POWER_ASSIGN: "**",
1971
+ }
1972
+ operator = op_map.get(self.cur_token.type, "+")
1973
+ self.next_token()
1974
+ right = self.parse_expression(LOWEST)
1975
+
1976
+ # Desugar: x += expr → x = x + expr
1977
+ infix = InfixExpression(left=left, operator=operator, right=right)
1978
+ return AssignmentExpression(name=left, value=infix)
1979
+
1668
1980
  def parse_method_call_expression(self, left):
1669
1981
  if not self.cur_token_is(DOT):
1670
1982
  return None
@@ -1691,29 +2003,41 @@ class UltimateParser:
1691
2003
  def parse_export_statement(self):
1692
2004
  token = self.cur_token
1693
2005
 
1694
- # Check for syntactic sugar: export action name() {} or export function name() {}
1695
- if self.peek_token_is(ACTION) or self.peek_token_is(FUNCTION):
1696
- self.next_token() # Move to ACTION/FUNCTION token
1697
-
1698
- # Parse the action/function normally
1699
- if self.cur_token_is(ACTION):
1700
- func_stmt = self.parse_action_statement()
1701
- else: # FUNCTION
1702
- func_stmt = self.parse_function_statement()
1703
-
1704
- if func_stmt is None:
2006
+ keyword_parsers = {
2007
+ ACTION: self.parse_action_statement,
2008
+ FUNCTION: self.parse_function_statement,
2009
+ CONTRACT: self.parse_contract_statement,
2010
+ CONST: self.parse_const_statement,
2011
+ LET: self.parse_let_statement,
2012
+ DATA: self.parse_data_statement,
2013
+ }
2014
+
2015
+ if self.peek_token and self.peek_token.type in keyword_parsers:
2016
+ keyword_type = self.peek_token.type
2017
+ self.next_token() # Move to the declaration keyword
2018
+ declaration = keyword_parsers[keyword_type]()
2019
+
2020
+ if declaration is None:
1705
2021
  return None
1706
-
1707
- # Extract the function name for export
1708
- func_name = func_stmt.name.value if hasattr(func_stmt.name, 'value') else str(func_stmt.name)
1709
-
1710
- # Create a compound statement: the function definition + export
1711
- # We'll use a BlockStatement to hold both
1712
- from ..zexus_ast import BlockStatement, ExportStatement
1713
- export_stmt = ExportStatement(names=[Identifier(func_name)])
1714
-
1715
- # Return a block containing both statements
1716
- return BlockStatement(statements=[func_stmt, export_stmt])
2022
+
2023
+ decl_name = getattr(declaration, "name", None)
2024
+ export_names = []
2025
+
2026
+ if isinstance(decl_name, Identifier):
2027
+ export_names.append(decl_name)
2028
+ elif decl_name is not None:
2029
+ export_names.append(Identifier(str(decl_name)))
2030
+
2031
+ if not export_names:
2032
+ self.errors.append(
2033
+ f"Line {token.line}:{token.column} - Unable to determine export name"
2034
+ )
2035
+ return declaration
2036
+
2037
+ export_stmt = ExportStatement(names=export_names)
2038
+ block = BlockStatement()
2039
+ block.statements.extend([declaration, export_stmt])
2040
+ return block
1717
2041
 
1718
2042
  names = []
1719
2043
 
@@ -2614,12 +2938,16 @@ class UltimateParser:
2614
2938
  self.errors.append("Expected parameter name")
2615
2939
  return None
2616
2940
 
2617
- params.append(Identifier(self.cur_token.literal))
2941
+ param_name = self.cur_token.literal
2942
+ param_type = None
2618
2943
 
2619
- # Skip optional type annotation: : type
2944
+ # Capture optional type annotation: : type
2620
2945
  if self.peek_token_is(COLON):
2621
2946
  self.next_token() # Move to :
2622
- self.next_token() # Move to type (skip it)
2947
+ self.next_token() # Move to type
2948
+ param_type = self.cur_token.literal
2949
+
2950
+ params.append(Identifier(param_name, type_annotation=param_type))
2623
2951
 
2624
2952
  while self.peek_token_is(COMMA):
2625
2953
  self.next_token()
@@ -2627,12 +2955,16 @@ class UltimateParser:
2627
2955
  if not self.cur_token_is(IDENT):
2628
2956
  self.errors.append("Expected parameter name after comma")
2629
2957
  return None
2630
- params.append(Identifier(self.cur_token.literal))
2958
+ param_name = self.cur_token.literal
2959
+ param_type = None
2631
2960
 
2632
- # Skip optional type annotation: : type
2961
+ # Capture optional type annotation: : type
2633
2962
  if self.peek_token_is(COLON):
2634
2963
  self.next_token() # Move to :
2635
- self.next_token() # Move to type (skip it)
2964
+ self.next_token() # Move to type
2965
+ param_type = self.cur_token.literal
2966
+
2967
+ params.append(Identifier(param_name, type_annotation=param_type))
2636
2968
 
2637
2969
  if not self.expect_peek(RPAREN):
2638
2970
  self.errors.append("Expected ')' after parameters")
@@ -2802,9 +3134,8 @@ class UltimateParser:
2802
3134
  file_path = self.cur_token.literal
2803
3135
 
2804
3136
  alias = None
2805
- if self.peek_token_is(IDENT) and self.peek_token.literal == "as":
2806
- self.next_token()
2807
- self.next_token()
3137
+ if self.peek_token_is(AS) or (self.peek_token_is(IDENT) and self.peek_token.literal == "as"):
3138
+ self.next_token() # consume 'as'
2808
3139
  if not self.expect_peek(IDENT):
2809
3140
  self.errors.append("Expected alias name after 'as'")
2810
3141
  return None
@@ -2955,6 +3286,17 @@ class UltimateParser:
2955
3286
 
2956
3287
  def parse_return_statement(self):
2957
3288
  stmt = ReturnStatement(return_value=None)
3289
+ # Handle bare `return` without value
3290
+ if self.peek_token_is(SEMICOLON):
3291
+ # Advance to semicolon so outer loop can consume it on next iteration
3292
+ self.next_token()
3293
+ return stmt
3294
+
3295
+ if self.peek_token_is(RBRACE) or self.peek_token_is(EOF):
3296
+ # No explicit return value; leave current token on 'return'
3297
+ return stmt
3298
+
3299
+ # Otherwise parse the return value expression
2958
3300
  self.next_token()
2959
3301
  stmt.return_value = self.parse_expression(LOWEST)
2960
3302
  if self.peek_token_is(SEMICOLON):
@@ -3004,6 +3346,8 @@ class UltimateParser:
3004
3346
  if left_exp is None:
3005
3347
  return None
3006
3348
 
3349
+ debug_enabled = config.enable_debug_logs
3350
+
3007
3351
  # Stop parsing when we hit closing delimiters or terminators
3008
3352
  # This prevents the parser from trying to parse beyond expression boundaries
3009
3353
  while (not self.peek_token_is(SEMICOLON) and
@@ -3014,11 +3358,17 @@ class UltimateParser:
3014
3358
  not self.peek_token_is(LBRACE) and
3015
3359
  precedence <= self.peek_precedence()):
3016
3360
 
3017
- print(f"[EXPR LOOP] cur={self.cur_token.literal}@L{self.cur_token.line}, peek={self.peek_token.literal}@L{self.peek_token.line}, precedence={precedence}, peek_prec={self.peek_precedence()}")
3361
+ if debug_enabled:
3362
+ print(
3363
+ f"[EXPR LOOP] cur={self.cur_token.literal}@L{self.cur_token.line}, peek={self.peek_token.literal}@L{self.peek_token.line}, precedence={precedence}, peek_prec={self.peek_precedence()}"
3364
+ )
3018
3365
  # CRITICAL FIX: Stop if next token is on a new line and could start a new statement
3019
3366
  # This prevents expressions from spanning multiple logical lines
3020
3367
  if self.cur_token.line < self.peek_token.line:
3021
- print(f"[NEWLINE CHECK] cur_line={self.cur_token.line}, peek_line={self.peek_token.line}, peek_type={self.peek_token.type}, peek_lit={self.peek_token.literal}")
3368
+ if debug_enabled:
3369
+ print(
3370
+ f"[NEWLINE CHECK] cur_line={self.cur_token.line}, peek_line={self.peek_token.line}, peek_type={self.peek_token.type}, peek_lit={self.peek_token.literal}"
3371
+ )
3022
3372
  # Next token is on a new line - check if it could start a new statement
3023
3373
  next_could_be_statement = (
3024
3374
  self.peek_token.type == IDENT or
@@ -3027,9 +3377,12 @@ class UltimateParser:
3027
3377
  self.peek_token.type == RETURN or
3028
3378
  self.peek_token.type == IF or
3029
3379
  self.peek_token.type == WHILE or
3030
- self.peek_token.type == FOR
3380
+ self.peek_token.type == FOR or
3381
+ self.peek_token.type == FUNCTION or
3382
+ self.peek_token.type == ACTION
3031
3383
  )
3032
- print(f"[NEWLINE CHECK] next_could_be_statement={next_could_be_statement}")
3384
+ if debug_enabled:
3385
+ print(f"[NEWLINE CHECK] next_could_be_statement={next_could_be_statement}")
3033
3386
  if next_could_be_statement:
3034
3387
  # Additional check: is the next token followed by [ or = ?
3035
3388
  # This would indicate it's an assignment/index expression starting
@@ -3125,8 +3478,9 @@ class UltimateParser:
3125
3478
  def parse_identifier(self):
3126
3479
  # Allow DEBUG keyword to be used as identifier in expression contexts
3127
3480
  # This enables debug(value) function calls while keeping debug value; statements
3128
- if self.cur_token.type == DEBUG:
3129
- return Identifier(value="debug")
3481
+ if self.cur_token.type in {DEBUG, EVENT}:
3482
+ literal = getattr(self.cur_token, 'literal', None)
3483
+ return Identifier(value=literal if literal is not None else self.cur_token.type.lower())
3130
3484
  return Identifier(value=self.cur_token.literal)
3131
3485
 
3132
3486
  def parse_integer_literal(self):
@@ -3146,17 +3500,44 @@ class UltimateParser:
3146
3500
  def parse_string_literal(self):
3147
3501
  return StringLiteral(value=self.cur_token.literal)
3148
3502
 
3503
+ def parse_interpolated_string(self):
3504
+ """Parse a string with ${expr} interpolation.
3505
+
3506
+ The token literal is a list of ("str", text) or ("expr", source) tuples
3507
+ produced by the lexer. For each "expr" part, we create a sub-lexer and
3508
+ sub-parser to parse the expression source into an AST node.
3509
+ """
3510
+ raw_parts = self.cur_token.literal
3511
+ parsed_parts = []
3512
+ for part_type, part_value in raw_parts:
3513
+ if part_type == "str":
3514
+ parsed_parts.append(("str", part_value))
3515
+ elif part_type == "expr":
3516
+ # Parse the expression using a sub-parser
3517
+ sub_lexer = Lexer(part_value)
3518
+ sub_parser = Parser(sub_lexer)
3519
+ expr_node = sub_parser.parse_expression(LOWEST)
3520
+ if expr_node is None:
3521
+ # Fallback: treat as empty string
3522
+ parsed_parts.append(("str", ""))
3523
+ else:
3524
+ parsed_parts.append(("expr", expr_node))
3525
+ return StringInterpolationExpression(parts=parsed_parts)
3526
+
3149
3527
  def parse_boolean(self):
3150
3528
  lit = getattr(self.cur_token, 'literal', '')
3151
3529
  val = True if isinstance(lit, str) and lit.lower() == 'true' else False
3152
3530
  # Transient trace to diagnose boolean parsing
3153
- try:
3154
- if lit.lower() == 'false':
3155
- import traceback as _tb
3156
- stack = ''.join(_tb.format_stack(limit=4)[-2:])
3157
- print(f"[PARSE_BOOL_TRACE] false token at position {self.lexer.position}: literal={lit}, val={val}\n{stack}")
3158
- except Exception:
3159
- pass
3531
+ if config.enable_debug_logs:
3532
+ try:
3533
+ if lit.lower() == 'false':
3534
+ import traceback as _tb
3535
+ stack = ''.join(_tb.format_stack(limit=4)[-2:])
3536
+ print(
3537
+ f"[PARSE_BOOL_TRACE] false token at position {self.lexer.position}: literal={lit}, val={val}\n{stack}"
3538
+ )
3539
+ except Exception:
3540
+ pass
3160
3541
  return Boolean(value=val)
3161
3542
 
3162
3543
  def parse_null(self):
@@ -3285,11 +3666,39 @@ class UltimateParser:
3285
3666
  # current token is LBRACKET (parser calls this after advancing to that token)
3286
3667
  # Move to the first token inside the brackets
3287
3668
  self.next_token()
3288
- index_expr = self.parse_expression(LOWEST)
3669
+ start_expr = None
3670
+ end_expr = None
3671
+
3672
+ if self.cur_token_is(COLON):
3673
+ # Slice with omitted start: obj[:end]
3674
+ if self.peek_token_is(RBRACKET):
3675
+ # obj[:]
3676
+ self.next_token()
3677
+ return SliceExpression(object=left, start=None, end=None)
3678
+ self.next_token()
3679
+ end_expr = self.parse_expression(LOWEST)
3680
+ if not self.expect_peek(RBRACKET):
3681
+ return None
3682
+ return SliceExpression(object=left, start=None, end=end_expr)
3683
+
3684
+ start_expr = self.parse_expression(LOWEST)
3685
+
3686
+ if self.peek_token_is(COLON):
3687
+ # Slice with explicit start: obj[start:end]
3688
+ self.next_token() # move to ':'
3689
+ if self.peek_token_is(RBRACKET):
3690
+ self.next_token() # move to ']'
3691
+ return SliceExpression(object=left, start=start_expr, end=None)
3692
+ self.next_token()
3693
+ end_expr = self.parse_expression(LOWEST)
3694
+ if not self.expect_peek(RBRACKET):
3695
+ return None
3696
+ return SliceExpression(object=left, start=start_expr, end=end_expr)
3697
+
3289
3698
  # Expect closing bracket
3290
3699
  if not self.expect_peek(RBRACKET):
3291
3700
  return None
3292
- return PropertyAccessExpression(object=left, property=index_expr, computed=True)
3701
+ return PropertyAccessExpression(object=left, property=start_expr, computed=True)
3293
3702
 
3294
3703
  def _lookahead_token_after_matching_paren(self):
3295
3704
  """Character-level lookahead: detect if the matching ')' is followed by '=>' (arrow).
@@ -3324,6 +3733,73 @@ class UltimateParser:
3324
3733
 
3325
3734
  return None
3326
3735
 
3736
+ def parse_match_expression(self):
3737
+ """Parse match expression: match value { case p: r, ... } or match value { p => r, ... }"""
3738
+ expression = MatchExpression(value=None, cases=[])
3739
+
3740
+ self.next_token() # Consume MATCH
3741
+
3742
+ expression.value = self.parse_expression(LOWEST)
3743
+
3744
+ if not self.expect_peek(LBRACE):
3745
+ return None
3746
+
3747
+ while not self.peek_token_is(RBRACE) and not self.peek_token_is(EOF):
3748
+ if self.peek_token_is(CASE):
3749
+ # case pattern: result syntax
3750
+ self.next_token() # Consume CASE
3751
+ if not self.peek_token_is(COLON):
3752
+ self.next_token()
3753
+ pattern = self.parse_expression(LOWEST)
3754
+ if not self.expect_peek(COLON):
3755
+ return None
3756
+ result = None
3757
+ if self.peek_token_is(LBRACE):
3758
+ if not self.expect_peek(LBRACE):
3759
+ return None
3760
+ result = self.parse_block_statement()
3761
+ else:
3762
+ self.next_token()
3763
+ result = self.parse_expression(LOWEST)
3764
+ if self.peek_token_is(COMMA) or self.peek_token_is(SEMICOLON):
3765
+ self.next_token()
3766
+ case = MatchCase(pattern=pattern, result=result)
3767
+ expression.cases.append(case)
3768
+ elif self.peek_token_is(DEFAULT):
3769
+ # default: result syntax
3770
+ self.next_token() # Consume DEFAULT
3771
+ if not self.expect_peek(COLON):
3772
+ return None
3773
+ self.next_token()
3774
+ result = self.parse_expression(LOWEST)
3775
+ if self.peek_token_is(COMMA) or self.peek_token_is(SEMICOLON):
3776
+ self.next_token()
3777
+ pattern = Identifier(value="_")
3778
+ case = MatchCase(pattern=pattern, result=result)
3779
+ expression.cases.append(case)
3780
+ else:
3781
+ # Arrow syntax: pattern => result
3782
+ self.next_token() # Move to pattern token
3783
+ pattern = self.parse_expression(LOWEST)
3784
+
3785
+ # Expect => (LAMBDA token with literal '=>')
3786
+ if self.peek_token_is(LAMBDA):
3787
+ self.next_token() # Consume =>
3788
+ self.next_token() # Move to result
3789
+ result = self.parse_expression(LOWEST)
3790
+ if self.peek_token_is(COMMA) or self.peek_token_is(SEMICOLON):
3791
+ self.next_token()
3792
+ case = MatchCase(pattern=pattern, result=result)
3793
+ expression.cases.append(case)
3794
+ else:
3795
+ # Skip unexpected tokens
3796
+ pass
3797
+
3798
+ if not self.expect_peek(RBRACE):
3799
+ return None
3800
+
3801
+ return expression
3802
+
3327
3803
  def parse_if_expression(self):
3328
3804
  """Parse if expression - handles both statement form and expression form
3329
3805
 
@@ -3563,17 +4039,27 @@ class UltimateParser:
3563
4039
  storage_vars = []
3564
4040
  actions = []
3565
4041
 
3566
- while not self.cur_token_is(RBRACE) and not self.cur_token_is(EOF):
4042
+ while not self.cur_token_is(EOF):
3567
4043
  self.next_token()
4044
+
4045
+ # Parse modifiers preceding the declaration
4046
+ modifiers = self._parse_modifiers()
3568
4047
 
3569
4048
  if self.cur_token_is(RBRACE):
4049
+ # If more declarations follow, skip this brace (close of inner block)
4050
+ if (self.peek_token_is(ACTION) or self.peek_token_is(STATE) or self.peek_token_is(DATA) or
4051
+ (self.peek_token_is(IDENT) and getattr(self.peek_token, 'literal', None) == 'persistent')):
4052
+ continue
3570
4053
  break
3571
4054
 
3572
4055
  # Check for state variable declaration
3573
4056
  if self.cur_token_is(STATE):
3574
4057
  state_stmt = self.parse_state_statement()
3575
4058
  if state_stmt:
4059
+ # Attach parsed modifiers
4060
+ state_stmt.modifiers = modifiers
3576
4061
  storage_vars.append(state_stmt)
4062
+ print(f"DEBUG: Parsed state {state_stmt.name.value} modifiers={modifiers}")
3577
4063
 
3578
4064
  # Check for data member declaration
3579
4065
  elif self.cur_token_is(DATA):
@@ -3588,12 +4074,12 @@ class UltimateParser:
3588
4074
  self.next_token() # Move to value
3589
4075
  data_value = self.parse_expression(LOWEST)
3590
4076
 
3591
- # Create a let statement for the data member
3592
- from ..zexus_ast import LetStatement
3593
- data_stmt = LetStatement()
3594
- data_stmt.name = Identifier(data_name)
3595
- data_stmt.value = data_value
4077
+ # Treat contract data as state with default value
4078
+ from ..zexus_ast import StateStatement
4079
+ # Pass modifiers to constructor
4080
+ data_stmt = StateStatement(Identifier(data_name), data_value, modifiers=modifiers)
3596
4081
  storage_vars.append(data_stmt)
4082
+ print(f"DEBUG: Parsed data {data_name} modifiers={modifiers}")
3597
4083
 
3598
4084
  # Consume optional semicolon (same as parse_state_statement)
3599
4085
  if self.peek_token_is(SEMICOLON):
@@ -3606,21 +4092,31 @@ class UltimateParser:
3606
4092
  self.next_token()
3607
4093
  if self.cur_token_is(IDENT):
3608
4094
  storage_name = self.cur_token.literal
4095
+ # Note: Persistent storage doesn't support standard modifiers yet
3609
4096
  storage_vars.append({"name": storage_name})
3610
4097
 
3611
4098
  # Check for action definition
3612
4099
  elif self.cur_token_is(ACTION):
3613
4100
  action = self.parse_action_statement()
3614
4101
  if action:
4102
+ # Attach parsed modifiers
4103
+ action.modifiers = modifiers
3615
4104
  actions.append(action)
3616
4105
 
3617
- self.expect_peek(RBRACE)
4106
+ if not self.cur_token_is(RBRACE):
4107
+ # Tolerant: if the contract body ends at EOF, don't emit a hard error
4108
+ if not self.peek_token_is(EOF):
4109
+ self.expect_peek(RBRACE)
3618
4110
 
3619
4111
  # Create body block with storage vars and actions
3620
4112
  body = BlockStatement()
3621
4113
  body.statements = storage_vars + actions
4114
+
4115
+ contract_node = ContractStatement(contract_name, body, modifiers=[], implements=implements)
4116
+ contract_node.storage_vars = storage_vars
4117
+ contract_node.actions = actions
3622
4118
 
3623
- return ContractStatement(contract_name, body, modifiers=[], implements=implements)
4119
+ return contract_node
3624
4120
 
3625
4121
  def parse_protect_statement(self):
3626
4122
  """Parse protect statement
@@ -4247,7 +4743,8 @@ class UltimateParser:
4247
4743
 
4248
4744
  Asserts condition, reverts transaction if false.
4249
4745
  """
4250
- print(f"[DEBUG PARSER] parse_require_statement called", flush=True)
4746
+ if config.enable_debug_logs:
4747
+ print("[DEBUG PARSER] parse_require_statement called", flush=True)
4251
4748
  token = self.cur_token
4252
4749
 
4253
4750
  if not self.expect_peek(LPAREN):
@@ -4271,7 +4768,11 @@ class UltimateParser:
4271
4768
  if self.peek_token_is(SEMICOLON):
4272
4769
  self.next_token()
4273
4770
 
4274
- print(f"[DEBUG PARSER] Creating RequireStatement with condition={condition}, message={message}", flush=True)
4771
+ if config.enable_debug_logs:
4772
+ print(
4773
+ f"[DEBUG PARSER] Creating RequireStatement with condition={condition}, message={message}",
4774
+ flush=True,
4775
+ )
4275
4776
  return RequireStatement(condition=condition, message=message)
4276
4777
 
4277
4778
  def parse_revert_statement(self):