zexus 1.7.1 → 1.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (159) hide show
  1. package/README.md +3 -3
  2. package/package.json +1 -1
  3. package/src/__init__.py +7 -0
  4. package/src/zexus/__init__.py +1 -1
  5. package/src/zexus/__pycache__/__init__.cpython-312.pyc +0 -0
  6. package/src/zexus/__pycache__/capability_system.cpython-312.pyc +0 -0
  7. package/src/zexus/__pycache__/debug_sanitizer.cpython-312.pyc +0 -0
  8. package/src/zexus/__pycache__/environment.cpython-312.pyc +0 -0
  9. package/src/zexus/__pycache__/error_reporter.cpython-312.pyc +0 -0
  10. package/src/zexus/__pycache__/input_validation.cpython-312.pyc +0 -0
  11. package/src/zexus/__pycache__/lexer.cpython-312.pyc +0 -0
  12. package/src/zexus/__pycache__/module_cache.cpython-312.pyc +0 -0
  13. package/src/zexus/__pycache__/module_manager.cpython-312.pyc +0 -0
  14. package/src/zexus/__pycache__/object.cpython-312.pyc +0 -0
  15. package/src/zexus/__pycache__/security.cpython-312.pyc +0 -0
  16. package/src/zexus/__pycache__/security_enforcement.cpython-312.pyc +0 -0
  17. package/src/zexus/__pycache__/syntax_validator.cpython-312.pyc +0 -0
  18. package/src/zexus/__pycache__/zexus_ast.cpython-312.pyc +0 -0
  19. package/src/zexus/__pycache__/zexus_token.cpython-312.pyc +0 -0
  20. package/src/zexus/access_control_system/__pycache__/__init__.cpython-312.pyc +0 -0
  21. package/src/zexus/access_control_system/__pycache__/access_control.cpython-312.pyc +0 -0
  22. package/src/zexus/advanced_types.py +17 -2
  23. package/src/zexus/blockchain/__init__.py +411 -0
  24. package/src/zexus/blockchain/accelerator.py +1160 -0
  25. package/src/zexus/blockchain/chain.py +660 -0
  26. package/src/zexus/blockchain/consensus.py +821 -0
  27. package/src/zexus/blockchain/contract_vm.py +1019 -0
  28. package/src/zexus/blockchain/crypto.py +79 -14
  29. package/src/zexus/blockchain/events.py +526 -0
  30. package/src/zexus/blockchain/loadtest.py +721 -0
  31. package/src/zexus/blockchain/monitoring.py +350 -0
  32. package/src/zexus/blockchain/mpt.py +716 -0
  33. package/src/zexus/blockchain/multichain.py +951 -0
  34. package/src/zexus/blockchain/multiprocess_executor.py +338 -0
  35. package/src/zexus/blockchain/network.py +886 -0
  36. package/src/zexus/blockchain/node.py +666 -0
  37. package/src/zexus/blockchain/rpc.py +1203 -0
  38. package/src/zexus/blockchain/rust_bridge.py +421 -0
  39. package/src/zexus/blockchain/storage.py +423 -0
  40. package/src/zexus/blockchain/tokens.py +750 -0
  41. package/src/zexus/blockchain/upgradeable.py +1004 -0
  42. package/src/zexus/blockchain/verification.py +1602 -0
  43. package/src/zexus/blockchain/wallet.py +621 -0
  44. package/src/zexus/cli/__pycache__/main.cpython-312.pyc +0 -0
  45. package/src/zexus/cli/main.py +300 -20
  46. package/src/zexus/cli/zpm.py +1 -1
  47. package/src/zexus/compiler/__pycache__/bytecode.cpython-312.pyc +0 -0
  48. package/src/zexus/compiler/__pycache__/lexer.cpython-312.pyc +0 -0
  49. package/src/zexus/compiler/__pycache__/parser.cpython-312.pyc +0 -0
  50. package/src/zexus/compiler/__pycache__/semantic.cpython-312.pyc +0 -0
  51. package/src/zexus/compiler/__pycache__/zexus_ast.cpython-312.pyc +0 -0
  52. package/src/zexus/compiler/lexer.py +10 -5
  53. package/src/zexus/concurrency_system.py +79 -0
  54. package/src/zexus/config.py +54 -0
  55. package/src/zexus/crypto_bridge.py +244 -8
  56. package/src/zexus/dap/__init__.py +10 -0
  57. package/src/zexus/dap/__main__.py +4 -0
  58. package/src/zexus/dap/dap_server.py +391 -0
  59. package/src/zexus/dap/debug_engine.py +298 -0
  60. package/src/zexus/environment.py +10 -1
  61. package/src/zexus/evaluator/__pycache__/bytecode_compiler.cpython-312.pyc +0 -0
  62. package/src/zexus/evaluator/__pycache__/core.cpython-312.pyc +0 -0
  63. package/src/zexus/evaluator/__pycache__/expressions.cpython-312.pyc +0 -0
  64. package/src/zexus/evaluator/__pycache__/functions.cpython-312.pyc +0 -0
  65. package/src/zexus/evaluator/__pycache__/resource_limiter.cpython-312.pyc +0 -0
  66. package/src/zexus/evaluator/__pycache__/statements.cpython-312.pyc +0 -0
  67. package/src/zexus/evaluator/__pycache__/unified_execution.cpython-312.pyc +0 -0
  68. package/src/zexus/evaluator/__pycache__/utils.cpython-312.pyc +0 -0
  69. package/src/zexus/evaluator/bytecode_compiler.py +441 -37
  70. package/src/zexus/evaluator/core.py +560 -49
  71. package/src/zexus/evaluator/expressions.py +122 -49
  72. package/src/zexus/evaluator/functions.py +417 -16
  73. package/src/zexus/evaluator/statements.py +521 -118
  74. package/src/zexus/evaluator/unified_execution.py +573 -72
  75. package/src/zexus/evaluator/utils.py +14 -2
  76. package/src/zexus/event_loop.py +186 -0
  77. package/src/zexus/lexer.py +742 -486
  78. package/src/zexus/lsp/__init__.py +1 -1
  79. package/src/zexus/lsp/definition_provider.py +163 -9
  80. package/src/zexus/lsp/server.py +22 -8
  81. package/src/zexus/lsp/symbol_provider.py +182 -9
  82. package/src/zexus/module_cache.py +237 -9
  83. package/src/zexus/object.py +64 -6
  84. package/src/zexus/parser/__pycache__/parser.cpython-312.pyc +0 -0
  85. package/src/zexus/parser/__pycache__/strategy_context.cpython-312.pyc +0 -0
  86. package/src/zexus/parser/__pycache__/strategy_structural.cpython-312.pyc +0 -0
  87. package/src/zexus/parser/parser.py +786 -285
  88. package/src/zexus/parser/strategy_context.py +407 -66
  89. package/src/zexus/parser/strategy_structural.py +117 -19
  90. package/src/zexus/persistence.py +15 -1
  91. package/src/zexus/renderer/__init__.py +15 -0
  92. package/src/zexus/renderer/__pycache__/__init__.cpython-312.pyc +0 -0
  93. package/src/zexus/renderer/__pycache__/backend.cpython-312.pyc +0 -0
  94. package/src/zexus/renderer/__pycache__/canvas.cpython-312.pyc +0 -0
  95. package/src/zexus/renderer/__pycache__/color_system.cpython-312.pyc +0 -0
  96. package/src/zexus/renderer/__pycache__/layout.cpython-312.pyc +0 -0
  97. package/src/zexus/renderer/__pycache__/main_renderer.cpython-312.pyc +0 -0
  98. package/src/zexus/renderer/__pycache__/painter.cpython-312.pyc +0 -0
  99. package/src/zexus/renderer/tk_backend.py +208 -0
  100. package/src/zexus/renderer/web_backend.py +260 -0
  101. package/src/zexus/runtime/__pycache__/__init__.cpython-312.pyc +0 -0
  102. package/src/zexus/runtime/__pycache__/async_runtime.cpython-312.pyc +0 -0
  103. package/src/zexus/runtime/__pycache__/load_manager.cpython-312.pyc +0 -0
  104. package/src/zexus/runtime/file_flags.py +137 -0
  105. package/src/zexus/safety/__pycache__/__init__.cpython-312.pyc +0 -0
  106. package/src/zexus/safety/__pycache__/memory_safety.cpython-312.pyc +0 -0
  107. package/src/zexus/security.py +424 -34
  108. package/src/zexus/stdlib/fs.py +23 -18
  109. package/src/zexus/stdlib/http.py +289 -186
  110. package/src/zexus/stdlib/sockets.py +207 -163
  111. package/src/zexus/stdlib/websockets.py +282 -0
  112. package/src/zexus/stdlib_integration.py +369 -2
  113. package/src/zexus/strategy_recovery.py +6 -3
  114. package/src/zexus/type_checker.py +423 -0
  115. package/src/zexus/virtual_filesystem.py +189 -2
  116. package/src/zexus/vm/__init__.py +113 -3
  117. package/src/zexus/vm/__pycache__/async_optimizer.cpython-312.pyc +0 -0
  118. package/src/zexus/vm/__pycache__/bytecode.cpython-312.pyc +0 -0
  119. package/src/zexus/vm/__pycache__/bytecode_converter.cpython-312.pyc +0 -0
  120. package/src/zexus/vm/__pycache__/cache.cpython-312.pyc +0 -0
  121. package/src/zexus/vm/__pycache__/compiler.cpython-312.pyc +0 -0
  122. package/src/zexus/vm/__pycache__/gas_metering.cpython-312.pyc +0 -0
  123. package/src/zexus/vm/__pycache__/jit.cpython-312.pyc +0 -0
  124. package/src/zexus/vm/__pycache__/parallel_vm.cpython-312.pyc +0 -0
  125. package/src/zexus/vm/__pycache__/vm.cpython-312.pyc +0 -0
  126. package/src/zexus/vm/async_optimizer.py +14 -1
  127. package/src/zexus/vm/binary_bytecode.py +659 -0
  128. package/src/zexus/vm/bytecode.py +28 -1
  129. package/src/zexus/vm/bytecode_converter.py +26 -12
  130. package/src/zexus/vm/cabi.c +1985 -0
  131. package/src/zexus/vm/cabi.cpython-312-x86_64-linux-gnu.so +0 -0
  132. package/src/zexus/vm/cabi.h +127 -0
  133. package/src/zexus/vm/cache.py +557 -17
  134. package/src/zexus/vm/compiler.py +703 -5
  135. package/src/zexus/vm/fastops.c +15743 -0
  136. package/src/zexus/vm/fastops.cpython-312-x86_64-linux-gnu.so +0 -0
  137. package/src/zexus/vm/fastops.pyx +288 -0
  138. package/src/zexus/vm/gas_metering.py +50 -9
  139. package/src/zexus/vm/jit.py +83 -2
  140. package/src/zexus/vm/native_jit_backend.py +1816 -0
  141. package/src/zexus/vm/native_runtime.cpp +1388 -0
  142. package/src/zexus/vm/native_runtime.cpython-312-x86_64-linux-gnu.so +0 -0
  143. package/src/zexus/vm/optimizer.py +161 -11
  144. package/src/zexus/vm/parallel_vm.py +118 -42
  145. package/src/zexus/vm/peephole_optimizer.py +82 -4
  146. package/src/zexus/vm/profiler.py +38 -18
  147. package/src/zexus/vm/register_allocator.py +16 -5
  148. package/src/zexus/vm/register_vm.py +8 -5
  149. package/src/zexus/vm/vm.py +3411 -573
  150. package/src/zexus/vm/wasm_compiler.py +658 -0
  151. package/src/zexus/zexus_ast.py +63 -11
  152. package/src/zexus/zexus_token.py +13 -5
  153. package/src/zexus/zpm/installer.py +55 -15
  154. package/src/zexus/zpm/package_manager.py +1 -1
  155. package/src/zexus/zpm/registry.py +257 -28
  156. package/src/zexus.egg-info/PKG-INFO +7 -4
  157. package/src/zexus.egg-info/SOURCES.txt +116 -9
  158. package/src/zexus.egg-info/entry_points.txt +1 -0
  159. package/src/zexus.egg-info/requires.txt +4 -0
@@ -6,6 +6,30 @@ from ..zexus_token import *
6
6
  from ..zexus_ast import *
7
7
  from ..config import config as zexus_config
8
8
  from types import SimpleNamespace # Helper for AST node creation
9
+ from collections import OrderedDict
10
+
11
+ STATEMENT_STARTERS = {
12
+ LET, CONST, DATA, PRINT, FOR, IF, WHILE, RETURN, CONTINUE, BREAK, THROW, ACTION, FUNCTION,
13
+ TRY, FINALLY, EXTERNAL, SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
14
+ EXPORT, USE, DEBUG, ENTITY, CONTRACT, VERIFY, PROTECT, PERSISTENT,
15
+ STORAGE, AUDIT, RESTRICT, SANDBOX, TRAIL, NATIVE, GC, INLINE, BUFFER,
16
+ SIMD, DEFER, PATTERN, ENUM, STREAM, WATCH, LOG, CAPABILITY, GRANT,
17
+ REVOKE, VALIDATE, SANITIZE, IMMUTABLE, INTERFACE, TYPE_ALIAS, MODULE,
18
+ PACKAGE, USING, MIDDLEWARE, AUTH, THROTTLE, CACHE, REQUIRE
19
+ }
20
+
21
+ _MEANINGFUL_TOKEN_TYPES = {
22
+ IDENT, STRING, INTERP_STRING, INT, FLOAT, LBRACE, RBRACE, LPAREN, RPAREN, LBRACKET,
23
+ RBRACKET, COMMA, DOT, SEMICOLON, ASSIGN, LAMBDA,
24
+ POWER, PLUS_ASSIGN, MINUS_ASSIGN, STAR_ASSIGN, SLASH_ASSIGN, MOD_ASSIGN, POWER_ASSIGN
25
+ }
26
+
27
+ _BLOCK_STATEMENTS_CACHE: "OrderedDict[tuple, tuple]" = OrderedDict()
28
+ _BLOCK_STATEMENTS_CACHE_MAX = 256
29
+
30
+
31
+ def _block_tokens_signature(tokens):
32
+ return tuple((t.type, t.literal) for t in tokens)
9
33
 
10
34
  # Import Parser for nested parsing (needed for LOG statement)
11
35
  # Note: This is imported at runtime to avoid circular dependency
@@ -137,6 +161,7 @@ class ContextStackParser:
137
161
  GRAPHICS: self._parse_graphics_statement,
138
162
  ANIMATION: self._parse_animation_statement,
139
163
  CLOCK: self._parse_clock_statement,
164
+ GC: self._parse_gc_statement_block,
140
165
  }
141
166
 
142
167
  def push_context(self, context_type, context_name=None):
@@ -157,6 +182,35 @@ class ContextStackParser:
157
182
  """Get the current parsing context"""
158
183
  return self.current_context[-1] if self.current_context else 'global'
159
184
 
185
+ def _parse_destructure_via_traditional(self, tokens):
186
+ """Delegate destructuring let/const parsing to the traditional parser.
187
+
188
+ This re-lexes the token stream through a mini UltimateParser instance so
189
+ that ``parse_let_statement`` / ``parse_const_statement`` (which already
190
+ understand ``{`` / ``[`` destructure patterns) handle the work.
191
+ """
192
+ from ..lexer import Lexer
193
+ from .parser import UltimateParser
194
+ # Reconstruct source code faithfully — STRING tokens must be re-quoted
195
+ # so the re-lexer doesn't treat them as identifiers.
196
+ parts = []
197
+ for t in tokens:
198
+ if not t.literal:
199
+ continue
200
+ if t.type == 'STRING':
201
+ # Escape inner double-quotes and wrap in quotes
202
+ escaped = t.literal.replace('\\', '\\\\').replace('"', '\\"')
203
+ parts.append(f'"{escaped}"')
204
+ else:
205
+ parts.append(t.literal)
206
+ code = ' '.join(parts)
207
+ mini_lexer = Lexer(code)
208
+ mini_parser = UltimateParser(mini_lexer, 'universal', False)
209
+ mini_program = mini_parser.parse_program()
210
+ if mini_program and mini_program.statements:
211
+ return mini_program.statements[0]
212
+ return None
213
+
160
214
  def parse_block(self, block_info, all_tokens):
161
215
  """Parse a block with context awareness"""
162
216
  block_type = block_info.get('subtype', block_info['type'])
@@ -167,14 +221,17 @@ class ContextStackParser:
167
221
  try:
168
222
  # Early exit: if a block has no meaningful tokens, skip parsing it
169
223
  tokens = block_info.get('tokens', []) or []
170
- def _meaningful(tok):
224
+ meaningful = False
225
+ for tok in tokens:
226
+ if tok.type in _MEANINGFUL_TOKEN_TYPES:
227
+ meaningful = True
228
+ break
171
229
  lit = getattr(tok, 'literal', None)
172
- # treat identifiers, strings, numbers and structural tokens as meaningful
173
- if tok.type in {IDENT, STRING, INT, FLOAT, LBRACE, RBRACE, LPAREN, RPAREN, LBRACKET, RBRACKET, COMMA, DOT, SEMICOLON, ASSIGN, LAMBDA}:
174
- return True
175
- return not (lit is None or lit == '')
230
+ if lit is not None and lit != '':
231
+ meaningful = True
232
+ break
176
233
 
177
- if not any(_meaningful(t) for t in tokens):
234
+ if not meaningful:
178
235
  ctx_debug(f"Skipping empty/insignificant block tokens for {block_type}", level='debug')
179
236
  return None
180
237
  # Use appropriate parsing strategy for this context
@@ -259,6 +316,10 @@ class ContextStackParser:
259
316
  parser_debug(" ❌ Invalid let statement: too few tokens")
260
317
  return None
261
318
 
319
+ # Destructuring pattern: let {a, b} = expr or let [x, y] = expr
320
+ if tokens[1].type in (LBRACE, LBRACKET):
321
+ return self._parse_destructure_via_traditional(tokens)
322
+
262
323
  if tokens[1].type != IDENT:
263
324
  parser_debug(" ❌ Invalid let statement: expected identifier after 'let'")
264
325
  return None
@@ -397,7 +458,7 @@ class ContextStackParser:
397
458
 
398
459
  # Check for statement starters that should break
399
460
  # Context-sensitive: IF followed by THEN is an expression, not a statement
400
- if t.type in {LET, PRINT, FOR, WHILE, RETURN, CONTINUE, ACTION, TRY, EXTERNAL,
461
+ if t.type in {LET, CONST, DATA, PRINT, FOR, WHILE, RETURN, CONTINUE, BREAK, THROW, ACTION, TRY, FINALLY, EXTERNAL,
401
462
  SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
402
463
  EXPORT, USE, DEBUG}:
403
464
  prev = tokens[j-1] if j > 0 else None
@@ -469,6 +530,10 @@ class ContextStackParser:
469
530
  parser_debug(" ❌ Invalid const statement: too few tokens")
470
531
  return None
471
532
 
533
+ # Destructuring pattern: const {a, b} = expr or const [x, y] = expr
534
+ if tokens[1].type in (LBRACE, LBRACKET):
535
+ return self._parse_destructure_via_traditional(tokens)
536
+
472
537
  if tokens[1].type != IDENT:
473
538
  parser_debug(" ❌ Invalid const statement: expected identifier after 'const'")
474
539
  return None
@@ -539,7 +604,7 @@ class ContextStackParser:
539
604
  j += 1 # Skip the semicolon
540
605
  break
541
606
  # Allow method chains but stop at other statement starters
542
- if t.type in {LET, CONST, PRINT, FOR, IF, WHILE, RETURN, ACTION, TRY, EXTERNAL,
607
+ if t.type in {LET, CONST, DATA, PRINT, FOR, IF, WHILE, RETURN, BREAK, THROW, ACTION, TRY, FINALLY, EXTERNAL,
543
608
  SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
544
609
  EXPORT, USE, DEBUG}:
545
610
  prev = tokens[j-1] if j > 0 else None
@@ -757,7 +822,7 @@ class ContextStackParser:
757
822
  operator_symbol = None
758
823
  if i < len(tokens):
759
824
  # Could be +, -, *, /, ==, etc.
760
- if tokens[i].type in {PLUS, MINUS, STAR, SLASH, MOD, EQ, NOT_EQ, LT, GT, LTE, GTE}:
825
+ if tokens[i].type in {PLUS, MINUS, STAR, SLASH, MOD, EQ, NOT_EQ, LT, GT, LTE, GTE, POWER}:
761
826
  operator_symbol = tokens[i].literal
762
827
  parser_debug(f" Operator: {operator_symbol}")
763
828
  i += 1
@@ -1137,15 +1202,27 @@ class ContextStackParser:
1137
1202
  parser_debug("🔧 [Context] Parsing assignment statement")
1138
1203
  tokens = block_info['tokens']
1139
1204
 
1140
- # Find the ASSIGN operator
1205
+ # Find the ASSIGN operator or compound assignment token
1141
1206
  assign_idx = None
1142
- compound_operator = None # Track if this is +=, -=, *=, /=, %=
1207
+ compound_operator = None # Track if this is +=, -=, *=, /=, %=, **=
1208
+
1209
+ # Map compound assignment token types to their operator strings
1210
+ _compound_assign_map = {
1211
+ PLUS_ASSIGN: "+", MINUS_ASSIGN: "-", STAR_ASSIGN: "*",
1212
+ SLASH_ASSIGN: "/", MOD_ASSIGN: "%", POWER_ASSIGN: "**"
1213
+ }
1143
1214
 
1144
1215
  for i, tok in enumerate(tokens):
1145
- if tok.type == ASSIGN:
1216
+ # Check for single-token compound assignments (+=, -=, *=, /=, %=, **=)
1217
+ if tok.type in _compound_assign_map:
1218
+ assign_idx = i
1219
+ compound_operator = _compound_assign_map[tok.type]
1220
+ parser_debug(f" 🔍 Detected compound assignment token: {compound_operator}= at position {i}")
1221
+ break
1222
+ elif tok.type == ASSIGN:
1146
1223
  assign_idx = i
1147
- # Check for compound assignment: operator immediately before =
1148
- if i > 0 and tokens[i-1].type in {PLUS, MINUS, STAR, SLASH, MOD}:
1224
+ # Legacy check: operator immediately before = (in case of split tokens)
1225
+ if i > 0 and tokens[i-1].type in {PLUS, MINUS, STAR, SLASH, MOD, POWER}:
1149
1226
  compound_operator = tokens[i-1].literal
1150
1227
  parser_debug(f" 🔍 Detected compound operator: {compound_operator}= at position {i-1}")
1151
1228
  assign_idx = i # Keep assign_idx at the = position
@@ -1156,11 +1233,11 @@ class ContextStackParser:
1156
1233
  return None
1157
1234
 
1158
1235
  # Parse the left-hand side (could be identifier or property access)
1159
- # For compound operators (+=), exclude the operator token from LHS
1160
- if compound_operator:
1236
+ # For compound operators with split tokens (op + =), exclude the operator token from LHS
1237
+ if compound_operator and tokens[assign_idx].type == ASSIGN:
1161
1238
  lhs_tokens = tokens[:assign_idx-1] # Exclude the operator before =
1162
1239
  else:
1163
- lhs_tokens = tokens[:assign_idx]
1240
+ lhs_tokens = tokens[:assign_idx] # For single-token compound assignments
1164
1241
 
1165
1242
  # Check if this is a property access (e.g., obj.property or obj["key"])
1166
1243
  target_expr = None
@@ -1180,9 +1257,9 @@ class ContextStackParser:
1180
1257
  value_tokens = []
1181
1258
  stop_types = {SEMICOLON} # RBRACE removed - handle with nesting instead
1182
1259
  statement_starters = {
1183
- LET, CONST, PRINT, FOR, IF, WHILE, RETURN, CONTINUE, ACTION, TRY, EXTERNAL,
1260
+ LET, CONST, DATA, PRINT, FOR, IF, WHILE, RETURN, CONTINUE, BREAK, THROW, ACTION, TRY, FINALLY, EXTERNAL,
1184
1261
  SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
1185
- EXPORT, USE, DEBUG, AUDIT, RESTRICT, SANDBOX, TRAIL, NATIVE, GC, INLINE,
1262
+ EXPORT, USE, DEBUG, ENTITY, CONTRACT, AUDIT, RESTRICT, SANDBOX, TRAIL, NATIVE, GC, INLINE,
1186
1263
  BUFFER, SIMD, DEFER, PATTERN, ENUM, STREAM, WATCH, CAPABILITY, GRANT,
1187
1264
  REVOKE, VALIDATE, SANITIZE, IMMUTABLE, INTERFACE, TYPE_ALIAS, MODULE,
1188
1265
  PACKAGE, USING
@@ -1465,6 +1542,22 @@ class ContextStackParser:
1465
1542
  if len(tokens) < 3:
1466
1543
  return None
1467
1544
 
1545
+ # Optimization: Check AST Cache
1546
+ token_hash = None
1547
+ try:
1548
+ # Create a signature from tokens (literal + type)
1549
+ # We use a tuple which is hashable and fast
1550
+ tv = tuple((t.literal, t.type) for t in tokens)
1551
+ token_hash = str(hash(tv))
1552
+
1553
+ from ..module_cache import get_cached_contract_ast, cache_contract_ast
1554
+ cached = get_cached_contract_ast(token_hash)
1555
+ if cached:
1556
+ parser_debug(f" ⚡ Cache Hit for Contract: {token_hash}")
1557
+ return cached
1558
+ except Exception:
1559
+ pass
1560
+
1468
1561
  # 1. Extract Name
1469
1562
  contract_name = tokens[1].literal if tokens[1].type == IDENT else "UnknownContract"
1470
1563
  parser_debug(f" 📝 Contract Name: {contract_name}")
@@ -1849,6 +1942,12 @@ class ContextStackParser:
1849
1942
  contract_stmt.storage_vars = storage_vars
1850
1943
  contract_stmt.actions = actions
1851
1944
 
1945
+ if token_hash:
1946
+ try:
1947
+ cache_contract_ast(token_hash, contract_stmt)
1948
+ except Exception:
1949
+ pass
1950
+
1852
1951
  return contract_stmt
1853
1952
 
1854
1953
  # === FIXED USE STATEMENT PARSERS ===
@@ -1913,7 +2012,7 @@ class ContextStackParser:
1913
2012
  for i, token in enumerate(tokens):
1914
2013
  if token.type == STRING:
1915
2014
  file_path = token.literal
1916
- elif token.type == IDENT and token.literal == 'as':
2015
+ elif token.type == AS or (token.type == IDENT and token.literal == 'as'):
1917
2016
  if i + 1 < len(tokens) and tokens[i + 1].type == IDENT:
1918
2017
  alias = tokens[i + 1].literal
1919
2018
 
@@ -1926,7 +2025,11 @@ class ContextStackParser:
1926
2025
  def _parse_export_statement_block(self, block_info, all_tokens):
1927
2026
  """Parse export statement: export { name1, name2, ... } or export const/let X = value"""
1928
2027
  tokens = block_info['tokens']
1929
- parser_debug(f" 📝 Found export statement: {[t.literal for t in tokens]}")
2028
+ if (
2029
+ (hasattr(zexus_config, 'enable_parser_debug') and zexus_config.enable_parser_debug)
2030
+ or zexus_config.should_log('debug')
2031
+ ):
2032
+ parser_debug(f" 📝 Found export statement: {[t.literal for t in tokens]}")
1930
2033
 
1931
2034
  # Check if this is "export const/let ..." syntax
1932
2035
  if len(tokens) >= 2 and tokens[0].type == EXPORT and tokens[1].type in [CONST, LET]:
@@ -2010,6 +2113,23 @@ class ContextStackParser:
2010
2113
  result.statements = [contract_stmt, export_stmt]
2011
2114
  return result
2012
2115
 
2116
+ # Check if this is "export data Name {...}" syntax
2117
+ if len(tokens) >= 3 and tokens[0].type == EXPORT and tokens[1].type == DATA:
2118
+ parser_debug(" 🎯 Handling 'export data' statement")
2119
+
2120
+ data_tokens = tokens[1:]
2121
+ data_stmt = self._parse_data_statement(data_tokens)
2122
+
2123
+ if data_stmt is None:
2124
+ return None
2125
+
2126
+ data_name = data_stmt.name.value if hasattr(data_stmt.name, 'value') else str(data_stmt.name)
2127
+
2128
+ export_stmt = ExportStatement(names=[Identifier(data_name)])
2129
+ result = BlockStatement()
2130
+ result.statements = [data_stmt, export_stmt]
2131
+ return result
2132
+
2013
2133
  # Check if this is "export function name(...) {...}" syntax
2014
2134
  if len(tokens) >= 3 and tokens[0].type == EXPORT and tokens[1].type == FUNCTION:
2015
2135
  parser_debug(" 🎯 Handling 'export function' statement")
@@ -2137,8 +2257,13 @@ class ContextStackParser:
2137
2257
  return self._parse_generic_statement_block(block_info, all_tokens)
2138
2258
 
2139
2259
  def _parse_generic_statement_block(self, block_info, all_tokens):
2140
- """Parse generic statement block - RETURNS ExpressionStatement"""
2260
+ """Parse generic statement block - RETURNS ExpressionStatement or AssignmentExpression"""
2141
2261
  tokens = block_info['tokens']
2262
+ # Check for compound assignment tokens (+=, -=, *=, /=, %=, **=)
2263
+ _compound_types = {PLUS_ASSIGN, MINUS_ASSIGN, STAR_ASSIGN, SLASH_ASSIGN, MOD_ASSIGN, POWER_ASSIGN}
2264
+ for tok in tokens:
2265
+ if tok.type in _compound_types or tok.type == ASSIGN:
2266
+ return self._parse_assignment_statement(block_info, all_tokens)
2142
2267
  expression = self._parse_expression(tokens)
2143
2268
  if expression:
2144
2269
  return ExpressionStatement(expression)
@@ -2155,11 +2280,13 @@ class ContextStackParser:
2155
2280
  try_block = self._parse_try_block(tokens)
2156
2281
  error_var = self._extract_catch_variable(tokens)
2157
2282
  catch_block = self._parse_catch_block(tokens)
2283
+ finally_block = self._parse_finally_block(tokens)
2158
2284
 
2159
2285
  return TryCatchStatement(
2160
2286
  try_block=try_block,
2161
2287
  error_variable=error_var,
2162
- catch_block=catch_block
2288
+ catch_block=catch_block,
2289
+ finally_block=finally_block
2163
2290
  )
2164
2291
 
2165
2292
  def _parse_try_block(self, tokens):
@@ -2226,23 +2353,57 @@ class ContextStackParser:
2226
2353
  parser_debug(" ⚠️ [Catch] Could not find catch block content")
2227
2354
  return BlockStatement()
2228
2355
 
2356
+ def _parse_finally_block(self, tokens):
2357
+ """Parse the finally block from tokens, if present. Returns BlockStatement or None."""
2358
+ finally_start = -1
2359
+ finally_end = -1
2360
+ brace_count = 0
2361
+ in_finally = False
2362
+
2363
+ for i, token in enumerate(tokens):
2364
+ if token.type == FINALLY:
2365
+ in_finally = True
2366
+ elif in_finally and token.type == LBRACE:
2367
+ if brace_count == 0:
2368
+ finally_start = i + 1
2369
+ brace_count += 1
2370
+ elif in_finally and token.type == RBRACE:
2371
+ brace_count -= 1
2372
+ if brace_count == 0:
2373
+ finally_end = i
2374
+ break
2375
+
2376
+ if finally_start != -1 and finally_end != -1 and finally_end > finally_start:
2377
+ finally_tokens = tokens[finally_start:finally_end]
2378
+ finally_block_statements = self._parse_block_statements(finally_tokens)
2379
+ block = BlockStatement()
2380
+ block.statements = finally_block_statements
2381
+ return block
2382
+
2383
+ return None
2384
+
2229
2385
  def _parse_block_statements(self, tokens):
2230
2386
  """Parse statements from a block of tokens"""
2231
2387
  if not tokens:
2232
2388
  return []
2389
+
2390
+ cache_key = None
2391
+ # Large blocks can be thousands of tokens; building a signature tuple
2392
+ # (and looking it up) can cost significant time/memory. Skip caching
2393
+ # for very large blocks to keep parsing stable.
2394
+ if len(tokens) <= 2000:
2395
+ try:
2396
+ cache_key = _block_tokens_signature(tokens)
2397
+ cached = _BLOCK_STATEMENTS_CACHE.get(cache_key)
2398
+ if cached is not None:
2399
+ return list(cached)
2400
+ except Exception:
2401
+ cache_key = None
2233
2402
 
2234
2403
  statements = []
2235
2404
  i = 0
2236
2405
  # Common statement-starter tokens used by several heuristics and fallbacks
2237
- statement_starters = {
2238
- LET, CONST, DATA, PRINT, FOR, IF, WHILE, RETURN, CONTINUE, ACTION, FUNCTION,
2239
- TRY, EXTERNAL, SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
2240
- EXPORT, USE, DEBUG, ENTITY, CONTRACT, VERIFY, PROTECT, PERSISTENT,
2241
- STORAGE, AUDIT, RESTRICT, SANDBOX, TRAIL, NATIVE, GC, INLINE, BUFFER,
2242
- SIMD, DEFER, PATTERN, ENUM, STREAM, WATCH, LOG, CAPABILITY, GRANT,
2243
- REVOKE, VALIDATE, SANITIZE, IMMUTABLE, INTERFACE, TYPE_ALIAS, MODULE,
2244
- PACKAGE, USING, MIDDLEWARE, AUTH, THROTTLE, CACHE, REQUIRE
2245
- }
2406
+ statement_starters = STATEMENT_STARTERS
2246
2407
 
2247
2408
  # Safety: track loop iterations to prevent infinite loops
2248
2409
  max_iterations = len(tokens) * 10 # Very generous limit
@@ -2439,7 +2600,7 @@ class ContextStackParser:
2439
2600
  prev_tok = tokens[j-1] if j > 0 else None
2440
2601
  is_method_call_continuation = prev_tok and prev_tok.type == DOT
2441
2602
  is_expression_continuation = prev_tok and prev_tok.type in {
2442
- PLUS, MINUS, STAR, SLASH, MOD, # Arithmetic operators
2603
+ PLUS, MINUS, STAR, SLASH, MOD, POWER, # Arithmetic operators
2443
2604
  EQ, NOT_EQ, LT, GT, LTE, GTE, # Comparison operators
2444
2605
  AND, OR, # Logical operators
2445
2606
  COMMA, # List separator
@@ -2524,6 +2685,32 @@ class ContextStackParser:
2524
2685
 
2525
2686
  i = j
2526
2687
 
2688
+ # GC statement heuristic (performance control)
2689
+ elif token.type == GC:
2690
+ j = i + 1
2691
+ action_literal = None
2692
+
2693
+ if j < len(tokens) and tokens[j].type == STRING:
2694
+ action_literal = tokens[j].literal
2695
+ j += 1
2696
+
2697
+ # Consume optional semicolon directly following the statement
2698
+ if j < len(tokens) and tokens[j].type == SEMICOLON:
2699
+ j += 1
2700
+
2701
+ if action_literal is not None:
2702
+ statements.append(GCStatement(action_literal))
2703
+ i = j
2704
+ continue
2705
+
2706
+ # Fallback: parse as expression if tokens are malformed
2707
+ gc_tokens = tokens[i:j]
2708
+ expr = self._parse_expression(gc_tokens)
2709
+ if expr:
2710
+ statements.append(ExpressionStatement(expr))
2711
+ i = j if j > i else i + 1
2712
+ continue
2713
+
2527
2714
  # DATA statement heuristic (dataclass definition)
2528
2715
  # But not if DATA is used as an identifier (data = ...)
2529
2716
  elif token.type == DATA and i + 1 < len(tokens) and tokens[i + 1].type not in [ASSIGN, LBRACKET, DOT, LPAREN]:
@@ -3937,6 +4124,14 @@ class ContextStackParser:
3937
4124
  i = j
3938
4125
 
3939
4126
  # print(f" ✅ Parsed {len(statements)} statements from block")
4127
+ if cache_key is not None:
4128
+ try:
4129
+ _BLOCK_STATEMENTS_CACHE[cache_key] = tuple(statements)
4130
+ if len(_BLOCK_STATEMENTS_CACHE) > _BLOCK_STATEMENTS_CACHE_MAX:
4131
+ _BLOCK_STATEMENTS_CACHE.popitem(last=False)
4132
+ except Exception:
4133
+ pass
4134
+
3940
4135
  return statements
3941
4136
 
3942
4137
  # === MAP LITERAL PARSING ===
@@ -4582,16 +4777,46 @@ class ContextStackParser:
4582
4777
  # If there's a closing RBRACKET, skip it
4583
4778
  if i < n and tokens[i].type == RBRACKET:
4584
4779
  i += 1
4585
- prop_expr = self._parse_expression(inner_tokens) if inner_tokens else Identifier('')
4586
- current_expr = PropertyAccessExpression(
4587
- object=current_expr,
4588
- property=prop_expr,
4589
- computed=True
4590
- )
4780
+ colon_idx = None
4781
+ depth = 0
4782
+ for idx, tok in enumerate(inner_tokens):
4783
+ if tok.type in {LBRACKET, LPAREN, LBRACE}:
4784
+ depth += 1
4785
+ elif tok.type in {RBRACKET, RPAREN, RBRACE}:
4786
+ if depth > 0:
4787
+ depth -= 1
4788
+ elif tok.type == COLON and depth == 0:
4789
+ colon_idx = idx
4790
+ break
4791
+
4792
+ if colon_idx is not None:
4793
+ start_tokens = inner_tokens[:colon_idx]
4794
+ end_tokens = inner_tokens[colon_idx + 1:]
4795
+ start_expr = self._parse_expression(start_tokens) if start_tokens else None
4796
+ end_expr = self._parse_expression(end_tokens) if end_tokens else None
4797
+ current_expr = SliceExpression(
4798
+ object=current_expr,
4799
+ start=start_expr,
4800
+ end=end_expr
4801
+ )
4802
+ else:
4803
+ if not inner_tokens:
4804
+ parser_debug(" ❌ Empty bracket access is invalid")
4805
+ prop_expr = StringLiteral("")
4806
+ else:
4807
+ prop_expr = self._parse_expression(inner_tokens)
4808
+ if prop_expr is None:
4809
+ parser_debug(" ❌ Could not parse bracket expression")
4810
+ prop_expr = StringLiteral("")
4811
+ current_expr = PropertyAccessExpression(
4812
+ object=current_expr,
4813
+ property=prop_expr,
4814
+ computed=True
4815
+ )
4591
4816
  continue
4592
4817
 
4593
4818
  # Binary operators (comparisons and arithmetic - but NOT AND/OR which are handled above)
4594
- if t.type in {PLUS, MINUS, ASTERISK, SLASH, MOD,
4819
+ if t.type in {PLUS, MINUS, ASTERISK, SLASH, MOD, POWER,
4595
4820
  LT, GT, EQ, NOT_EQ, LTE, GTE}:
4596
4821
  # Get operator precedence
4597
4822
  op_precedence = self._get_operator_precedence(t.type)
@@ -4617,7 +4842,7 @@ class ContextStackParser:
4617
4842
  break
4618
4843
 
4619
4844
  # If we're not nested and we hit an operator with same or lower precedence, stop
4620
- if depth == 0 and tt.type in {PLUS, MINUS, ASTERISK, SLASH, MOD, LT, GT, EQ, NOT_EQ, LTE, GTE}:
4845
+ if depth == 0 and tt.type in {PLUS, MINUS, ASTERISK, SLASH, MOD, POWER, LT, GT, EQ, NOT_EQ, LTE, GTE}:
4621
4846
  next_precedence = self._get_operator_precedence(tt.type)
4622
4847
  # For left-associative operators, stop if next has same or lower precedence
4623
4848
  if next_precedence <= op_precedence:
@@ -4649,12 +4874,15 @@ class ContextStackParser:
4649
4874
  Higher numbers = higher precedence (evaluated first)
4650
4875
  """
4651
4876
  # Precedence levels (matching parser.py conventions)
4877
+ POWER_PREC = 10 # ** (exponentiation)
4652
4878
  PRODUCT = 9 # *, /, %
4653
4879
  SUM = 8 # +, -
4654
4880
  COMPARISON = 7 # <, >, <=, >=
4655
4881
  EQUALITY = 6 # ==, !=
4656
4882
 
4657
- if token_type in {ASTERISK, SLASH, MOD}:
4883
+ if token_type == POWER:
4884
+ return POWER_PREC
4885
+ elif token_type in {ASTERISK, SLASH, MOD}:
4658
4886
  return PRODUCT
4659
4887
  elif token_type in {PLUS, MINUS}:
4660
4888
  return SUM
@@ -4669,6 +4897,21 @@ class ContextStackParser:
4669
4897
  """Parse a single token into an expression"""
4670
4898
  if token.type == STRING:
4671
4899
  return StringLiteral(token.literal)
4900
+ elif token.type == INTERP_STRING:
4901
+ # Interpolated string — delegate to sub-parser for each expr part
4902
+ from ..lexer import Lexer as _Lexer
4903
+ raw_parts = token.literal # list of ("str", text) | ("expr", source)
4904
+ parsed_parts = []
4905
+ for part_type, part_value in raw_parts:
4906
+ if part_type == "str":
4907
+ parsed_parts.append(("str", part_value))
4908
+ elif part_type == "expr":
4909
+ sub_lexer = _Lexer(part_value)
4910
+ from .parser import Parser as _Parser, LOWEST as _LOWEST
4911
+ sub_parser = _Parser(sub_lexer)
4912
+ expr_node = sub_parser.parse_expression(_LOWEST)
4913
+ parsed_parts.append(("expr", expr_node if expr_node else StringLiteral("")))
4914
+ return StringInterpolationExpression(parts=parsed_parts)
4672
4915
  elif token.type == INT:
4673
4916
  try:
4674
4917
  return IntegerLiteral(int(token.literal))
@@ -4925,14 +5168,81 @@ class ContextStackParser:
4925
5168
 
4926
5169
  # Parse match cases
4927
5170
  cases = []
5171
+
5172
+ # Support case/colon syntax when present
5173
+ if any(t.type in {CASE, DEFAULT} for t in body_tokens):
5174
+ i = 0
5175
+ while i < len(body_tokens):
5176
+ if body_tokens[i].type in {COMMA, SEMICOLON}:
5177
+ i += 1
5178
+ continue
5179
+
5180
+ pattern = None
5181
+
5182
+ if body_tokens[i].type == DEFAULT:
5183
+ pattern = WildcardPattern()
5184
+ i += 1
5185
+ if i < len(body_tokens) and body_tokens[i].type == COLON:
5186
+ i += 1
5187
+ elif body_tokens[i].type == CASE:
5188
+ i += 1
5189
+ pattern_start = i
5190
+ depth = 0
5191
+ while i < len(body_tokens):
5192
+ t = body_tokens[i]
5193
+ if t.type in {LPAREN, LBRACE, LBRACKET}:
5194
+ depth += 1
5195
+ elif t.type in {RPAREN, RBRACE, RBRACKET}:
5196
+ depth -= 1
5197
+ elif t.type == COLON and depth == 0:
5198
+ break
5199
+ i += 1
5200
+
5201
+ colon_idx = i
5202
+ pattern_tokens = body_tokens[pattern_start:colon_idx]
5203
+ pattern = self._parse_pattern(pattern_tokens) if pattern_tokens else None
5204
+
5205
+ if i < len(body_tokens) and body_tokens[i].type == COLON:
5206
+ i += 1
5207
+ else:
5208
+ i += 1
5209
+ continue
5210
+
5211
+ result_start = i
5212
+ depth = 0
5213
+ while i < len(body_tokens):
5214
+ t = body_tokens[i]
5215
+ if t.type in {LPAREN, LBRACE, LBRACKET}:
5216
+ depth += 1
5217
+ elif t.type in {RPAREN, RBRACE, RBRACKET}:
5218
+ depth -= 1
5219
+ elif depth == 0 and t.type in {CASE, DEFAULT}:
5220
+ break
5221
+ elif depth == 0 and t.type in {COMMA, SEMICOLON}:
5222
+ break
5223
+ i += 1
5224
+
5225
+ result_tokens = body_tokens[result_start:i]
5226
+ result_expr = self._parse_expression(result_tokens) if result_tokens else NullLiteral()
5227
+
5228
+ if pattern:
5229
+ cases.append(MatchCase(pattern=pattern, result=result_expr))
5230
+ parser_debug(f" ✅ Parsed match case: {pattern} : {result_expr}")
5231
+
5232
+ while i < len(body_tokens) and body_tokens[i].type in {COMMA, SEMICOLON}:
5233
+ i += 1
5234
+
5235
+ parser_debug(f" ✅ Match expression with {len(cases)} cases")
5236
+ return MatchExpression(value=value_expr, cases=cases)
5237
+
4928
5238
  i = 0
4929
-
5239
+
4930
5240
  while i < len(body_tokens):
4931
5241
  # Skip commas and semicolons
4932
5242
  if body_tokens[i].type in {COMMA, SEMICOLON}:
4933
5243
  i += 1
4934
5244
  continue
4935
-
5245
+
4936
5246
  # Find the => separator
4937
5247
  arrow_idx = -1
4938
5248
  depth = 0
@@ -4944,25 +5254,25 @@ class ContextStackParser:
4944
5254
  elif body_tokens[j].type == LAMBDA and depth == 0: # => is tokenized as LAMBDA
4945
5255
  arrow_idx = j
4946
5256
  break
4947
-
5257
+
4948
5258
  if arrow_idx == -1:
4949
5259
  # No more cases
4950
5260
  break
4951
-
5261
+
4952
5262
  # Parse pattern (from i to arrow_idx)
4953
5263
  pattern_tokens = body_tokens[i:arrow_idx]
4954
5264
  pattern = self._parse_pattern(pattern_tokens)
4955
-
5265
+
4956
5266
  if not pattern:
4957
5267
  parser_debug(f" ❌ Failed to parse pattern: {[t.literal for t in pattern_tokens]}")
4958
5268
  i = arrow_idx + 1
4959
5269
  continue
4960
-
5270
+
4961
5271
  # Find result expression end (comma, semicolon, or next pattern)
4962
5272
  result_start = arrow_idx + 1
4963
5273
  result_end = result_start
4964
5274
  depth = 0
4965
-
5275
+
4966
5276
  while result_end < len(body_tokens):
4967
5277
  if body_tokens[result_end].type in {LPAREN, LBRACE, LBRACKET}:
4968
5278
  depth += 1
@@ -4977,7 +5287,7 @@ class ContextStackParser:
4977
5287
  # Patterns can start with: INT, STRING, IDENT (for constructor or wildcard)
4978
5288
  current_tok = body_tokens[result_end]
4979
5289
  next_tok = body_tokens[result_end + 1]
4980
-
5290
+
4981
5291
  # Pattern: literal => or _ => or Constructor( =>
4982
5292
  if current_tok.type in {INT, STRING, TRUE, FALSE}:
4983
5293
  # Look ahead for =>
@@ -5010,18 +5320,18 @@ class ContextStackParser:
5010
5320
  lookahead += 1
5011
5321
  if lookahead < len(body_tokens) and body_tokens[lookahead].type == LAMBDA:
5012
5322
  break # Start of new constructor pattern
5013
-
5323
+
5014
5324
  result_end += 1
5015
-
5325
+
5016
5326
  # Parse result expression
5017
5327
  result_tokens = body_tokens[result_start:result_end]
5018
5328
  result_expr = self._parse_expression(result_tokens) if result_tokens else NullLiteral()
5019
-
5329
+
5020
5330
  cases.append(MatchCase(pattern=pattern, result=result_expr))
5021
5331
  parser_debug(f" ✅ Parsed match case: {pattern} => {result_expr}")
5022
-
5332
+
5023
5333
  i = result_end
5024
-
5334
+
5025
5335
  parser_debug(f" ✅ Match expression with {len(cases)} cases")
5026
5336
  return MatchExpression(value=value_expr, cases=cases)
5027
5337
 
@@ -5691,15 +6001,19 @@ class ContextStackParser:
5691
6001
  while i < len(tokens) and tokens[i].type != RPAREN:
5692
6002
  if tokens[i].type == IDENT:
5693
6003
  # This is a parameter name
5694
- params.append(Identifier(tokens[i].literal))
6004
+ param_name = tokens[i].literal
6005
+ param_type = None
5695
6006
  i += 1
5696
6007
 
5697
6008
  # Check for type annotation: : type
5698
6009
  if i < len(tokens) and tokens[i].type == COLON:
5699
6010
  i += 1 # Skip COLON
5700
6011
  if i < len(tokens) and tokens[i].type == IDENT:
6012
+ param_type = tokens[i].literal
5701
6013
  i += 1 # Skip type name
5702
6014
 
6015
+ params.append(Identifier(param_name, type_annotation=param_type))
6016
+
5703
6017
  # Check for comma (more parameters)
5704
6018
  if i < len(tokens) and tokens[i].type == COMMA:
5705
6019
  i += 1 # Skip COMMA
@@ -5816,13 +6130,21 @@ class ContextStackParser:
5816
6130
  if not tokens:
5817
6131
  return None
5818
6132
 
5819
- # Check if this starts with ASYNC modifier
6133
+ # Check for optional modifiers before the FUNCTION keyword (async, inline, native, etc.)
6134
+ modifier_types = {
6135
+ ASYNC, INLINE, NATIVE, PUBLIC, PRIVATE, SEALED, SECURE, PURE, VIEW, PAYABLE,
6136
+ }
6137
+ modifiers = []
5820
6138
  is_async = False
5821
6139
  start_idx = 0
5822
- if tokens[0].type == ASYNC:
5823
- is_async = True
5824
- start_idx = 1
5825
-
6140
+
6141
+ while start_idx < len(tokens) and tokens[start_idx].type in modifier_types:
6142
+ modifier_token = tokens[start_idx]
6143
+ modifiers.append(getattr(modifier_token, 'literal', modifier_token.type))
6144
+ if modifier_token.type == ASYNC:
6145
+ is_async = True
6146
+ start_idx += 1
6147
+
5826
6148
  # Now check for FUNCTION keyword
5827
6149
  if start_idx >= len(tokens) or tokens[start_idx].type != FUNCTION:
5828
6150
  return None
@@ -5891,8 +6213,8 @@ class ContextStackParser:
5891
6213
  )
5892
6214
 
5893
6215
  # Set async flag if modifier was present
5894
- if is_async:
5895
- func_stmt.modifiers = ['async']
6216
+ if modifiers:
6217
+ func_stmt.modifiers = modifiers
5896
6218
 
5897
6219
  return func_stmt
5898
6220
 
@@ -6067,11 +6389,12 @@ class ContextStackParser:
6067
6389
  return statements[0] # Return the first (and likely only) statement
6068
6390
  return None
6069
6391
 
6070
- # Check if this is an assignment statement (identifier = value OR property.access = value)
6071
- # Look for ASSIGN token anywhere in the statement
6392
+ # Check if this is an assignment statement (identifier = value, +=, -=, *=, /=, %=, **=)
6393
+ # Look for ASSIGN or compound assignment token anywhere in the statement
6394
+ _compound_types = {PLUS_ASSIGN, MINUS_ASSIGN, STAR_ASSIGN, SLASH_ASSIGN, MOD_ASSIGN, POWER_ASSIGN}
6072
6395
  assign_idx = None
6073
6396
  for i, tok in enumerate(tokens):
6074
- if tok.type == ASSIGN:
6397
+ if tok.type == ASSIGN or tok.type in _compound_types:
6075
6398
  assign_idx = i
6076
6399
  break
6077
6400
 
@@ -7211,6 +7534,24 @@ class ContextStackParser:
7211
7534
  parser_debug(" ✅ Limit statement")
7212
7535
  return LimitStatement(amount=gas_limit)
7213
7536
 
7537
+ def _parse_gc_statement_block(self, block_info, all_tokens):
7538
+ """Parse gc "action" statements."""
7539
+ parser_debug("🔧 [Context] Parsing gc statement")
7540
+ tokens = block_info.get('tokens', [])
7541
+
7542
+ if not tokens or tokens[0].type != GC:
7543
+ parser_debug(" ❌ Expected GC keyword")
7544
+ return None
7545
+
7546
+ if len(tokens) < 2 or tokens[1].type != STRING:
7547
+ parser_debug(" ⚠️ GC statement missing string literal action")
7548
+ expr = self._parse_expression(tokens)
7549
+ return ExpressionStatement(expr) if expr else None
7550
+
7551
+ action = tokens[1].literal
7552
+ parser_debug(f" ✅ GC statement action='{action}'")
7553
+ return GCStatement(action)
7554
+
7214
7555
  def _parse_stream_statement(self, block_info, all_tokens):
7215
7556
  """Parse stream statement.
7216
7557