zexus 1.6.8 → 1.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -5
- package/package.json +1 -1
- package/src/__init__.py +7 -0
- package/src/zexus/__init__.py +1 -1
- package/src/zexus/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/capability_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/debug_sanitizer.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/environment.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/error_reporter.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/input_validation.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/module_cache.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/module_manager.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/object.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/security.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/security_enforcement.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/syntax_validator.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_token.cpython-312.pyc +0 -0
- package/src/zexus/access_control_system/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/access_control_system/__pycache__/access_control.cpython-312.pyc +0 -0
- package/src/zexus/advanced_types.py +17 -2
- package/src/zexus/blockchain/__init__.py +411 -0
- package/src/zexus/blockchain/accelerator.py +1160 -0
- package/src/zexus/blockchain/chain.py +660 -0
- package/src/zexus/blockchain/consensus.py +821 -0
- package/src/zexus/blockchain/contract_vm.py +1019 -0
- package/src/zexus/blockchain/crypto.py +79 -14
- package/src/zexus/blockchain/events.py +526 -0
- package/src/zexus/blockchain/loadtest.py +721 -0
- package/src/zexus/blockchain/monitoring.py +350 -0
- package/src/zexus/blockchain/mpt.py +716 -0
- package/src/zexus/blockchain/multichain.py +951 -0
- package/src/zexus/blockchain/multiprocess_executor.py +338 -0
- package/src/zexus/blockchain/network.py +886 -0
- package/src/zexus/blockchain/node.py +666 -0
- package/src/zexus/blockchain/rpc.py +1203 -0
- package/src/zexus/blockchain/rust_bridge.py +421 -0
- package/src/zexus/blockchain/storage.py +423 -0
- package/src/zexus/blockchain/tokens.py +750 -0
- package/src/zexus/blockchain/upgradeable.py +1004 -0
- package/src/zexus/blockchain/verification.py +1602 -0
- package/src/zexus/blockchain/wallet.py +621 -0
- package/src/zexus/capability_system.py +184 -9
- package/src/zexus/cli/__pycache__/main.cpython-312.pyc +0 -0
- package/src/zexus/cli/main.py +383 -34
- package/src/zexus/cli/zpm.py +1 -1
- package/src/zexus/compiler/__pycache__/bytecode.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/semantic.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/compiler/bytecode.py +124 -7
- package/src/zexus/compiler/compat_runtime.py +6 -2
- package/src/zexus/compiler/lexer.py +16 -5
- package/src/zexus/compiler/parser.py +108 -7
- package/src/zexus/compiler/semantic.py +18 -19
- package/src/zexus/compiler/zexus_ast.py +26 -1
- package/src/zexus/concurrency_system.py +79 -0
- package/src/zexus/config.py +54 -0
- package/src/zexus/crypto_bridge.py +244 -8
- package/src/zexus/dap/__init__.py +10 -0
- package/src/zexus/dap/__main__.py +4 -0
- package/src/zexus/dap/dap_server.py +391 -0
- package/src/zexus/dap/debug_engine.py +298 -0
- package/src/zexus/environment.py +112 -9
- package/src/zexus/evaluator/__pycache__/bytecode_compiler.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/core.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/expressions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/functions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/resource_limiter.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/statements.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/unified_execution.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/utils.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/bytecode_compiler.py +457 -37
- package/src/zexus/evaluator/core.py +644 -50
- package/src/zexus/evaluator/expressions.py +358 -62
- package/src/zexus/evaluator/functions.py +458 -20
- package/src/zexus/evaluator/resource_limiter.py +4 -4
- package/src/zexus/evaluator/statements.py +774 -122
- package/src/zexus/evaluator/unified_execution.py +573 -72
- package/src/zexus/evaluator/utils.py +14 -2
- package/src/zexus/evaluator_original.py +1 -1
- package/src/zexus/event_loop.py +186 -0
- package/src/zexus/lexer.py +742 -458
- package/src/zexus/lsp/__init__.py +1 -1
- package/src/zexus/lsp/definition_provider.py +163 -9
- package/src/zexus/lsp/server.py +22 -8
- package/src/zexus/lsp/symbol_provider.py +182 -9
- package/src/zexus/module_cache.py +239 -9
- package/src/zexus/module_manager.py +129 -1
- package/src/zexus/object.py +76 -6
- package/src/zexus/parser/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_context.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_structural.cpython-312.pyc +0 -0
- package/src/zexus/parser/parser.py +1349 -408
- package/src/zexus/parser/strategy_context.py +755 -58
- package/src/zexus/parser/strategy_structural.py +121 -21
- package/src/zexus/persistence.py +15 -1
- package/src/zexus/renderer/__init__.py +61 -0
- package/src/zexus/renderer/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/backend.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/canvas.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/color_system.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/layout.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/main_renderer.cpython-312.pyc +0 -0
- package/src/zexus/renderer/__pycache__/painter.cpython-312.pyc +0 -0
- package/src/zexus/renderer/backend.py +261 -0
- package/src/zexus/renderer/canvas.py +78 -0
- package/src/zexus/renderer/color_system.py +201 -0
- package/src/zexus/renderer/graphics.py +31 -0
- package/src/zexus/renderer/layout.py +222 -0
- package/src/zexus/renderer/main_renderer.py +66 -0
- package/src/zexus/renderer/painter.py +30 -0
- package/src/zexus/renderer/tk_backend.py +208 -0
- package/src/zexus/renderer/web_backend.py +260 -0
- package/src/zexus/runtime/__init__.py +10 -2
- package/src/zexus/runtime/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/runtime/__pycache__/async_runtime.cpython-312.pyc +0 -0
- package/src/zexus/runtime/__pycache__/load_manager.cpython-312.pyc +0 -0
- package/src/zexus/runtime/file_flags.py +137 -0
- package/src/zexus/runtime/load_manager.py +368 -0
- package/src/zexus/safety/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/safety/__pycache__/memory_safety.cpython-312.pyc +0 -0
- package/src/zexus/security.py +424 -34
- package/src/zexus/stdlib/fs.py +23 -18
- package/src/zexus/stdlib/http.py +289 -186
- package/src/zexus/stdlib/sockets.py +207 -163
- package/src/zexus/stdlib/websockets.py +282 -0
- package/src/zexus/stdlib_integration.py +369 -2
- package/src/zexus/strategy_recovery.py +6 -3
- package/src/zexus/type_checker.py +423 -0
- package/src/zexus/virtual_filesystem.py +189 -2
- package/src/zexus/vm/__init__.py +113 -3
- package/src/zexus/vm/__pycache__/async_optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/bytecode.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/bytecode_converter.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/cache.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/compiler.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/gas_metering.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/jit.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/parallel_vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/async_optimizer.py +80 -6
- package/src/zexus/vm/binary_bytecode.py +659 -0
- package/src/zexus/vm/bytecode.py +59 -11
- package/src/zexus/vm/bytecode_converter.py +26 -12
- package/src/zexus/vm/cabi.c +1985 -0
- package/src/zexus/vm/cabi.cpython-312-x86_64-linux-gnu.so +0 -0
- package/src/zexus/vm/cabi.h +127 -0
- package/src/zexus/vm/cache.py +561 -17
- package/src/zexus/vm/compiler.py +818 -51
- package/src/zexus/vm/fastops.c +15743 -0
- package/src/zexus/vm/fastops.cpython-312-x86_64-linux-gnu.so +0 -0
- package/src/zexus/vm/fastops.pyx +288 -0
- package/src/zexus/vm/gas_metering.py +50 -9
- package/src/zexus/vm/jit.py +364 -20
- package/src/zexus/vm/native_jit_backend.py +1816 -0
- package/src/zexus/vm/native_runtime.cpp +1388 -0
- package/src/zexus/vm/native_runtime.cpython-312-x86_64-linux-gnu.so +0 -0
- package/src/zexus/vm/optimizer.py +161 -11
- package/src/zexus/vm/parallel_vm.py +140 -45
- package/src/zexus/vm/peephole_optimizer.py +82 -4
- package/src/zexus/vm/profiler.py +38 -18
- package/src/zexus/vm/register_allocator.py +16 -5
- package/src/zexus/vm/register_vm.py +8 -5
- package/src/zexus/vm/vm.py +3581 -531
- package/src/zexus/vm/wasm_compiler.py +658 -0
- package/src/zexus/zexus_ast.py +137 -11
- package/src/zexus/zexus_token.py +16 -5
- package/src/zexus/zpm/installer.py +55 -15
- package/src/zexus/zpm/package_manager.py +1 -1
- package/src/zexus/zpm/registry.py +257 -28
- package/src/zexus.egg-info/PKG-INFO +16 -6
- package/src/zexus.egg-info/SOURCES.txt +129 -17
- package/src/zexus.egg-info/entry_points.txt +1 -0
- package/src/zexus.egg-info/requires.txt +4 -0
|
@@ -6,6 +6,30 @@ from ..zexus_token import *
|
|
|
6
6
|
from ..zexus_ast import *
|
|
7
7
|
from ..config import config as zexus_config
|
|
8
8
|
from types import SimpleNamespace # Helper for AST node creation
|
|
9
|
+
from collections import OrderedDict
|
|
10
|
+
|
|
11
|
+
STATEMENT_STARTERS = {
|
|
12
|
+
LET, CONST, DATA, PRINT, FOR, IF, WHILE, RETURN, CONTINUE, BREAK, THROW, ACTION, FUNCTION,
|
|
13
|
+
TRY, FINALLY, EXTERNAL, SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
|
|
14
|
+
EXPORT, USE, DEBUG, ENTITY, CONTRACT, VERIFY, PROTECT, PERSISTENT,
|
|
15
|
+
STORAGE, AUDIT, RESTRICT, SANDBOX, TRAIL, NATIVE, GC, INLINE, BUFFER,
|
|
16
|
+
SIMD, DEFER, PATTERN, ENUM, STREAM, WATCH, LOG, CAPABILITY, GRANT,
|
|
17
|
+
REVOKE, VALIDATE, SANITIZE, IMMUTABLE, INTERFACE, TYPE_ALIAS, MODULE,
|
|
18
|
+
PACKAGE, USING, MIDDLEWARE, AUTH, THROTTLE, CACHE, REQUIRE
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
_MEANINGFUL_TOKEN_TYPES = {
|
|
22
|
+
IDENT, STRING, INTERP_STRING, INT, FLOAT, LBRACE, RBRACE, LPAREN, RPAREN, LBRACKET,
|
|
23
|
+
RBRACKET, COMMA, DOT, SEMICOLON, ASSIGN, LAMBDA,
|
|
24
|
+
POWER, PLUS_ASSIGN, MINUS_ASSIGN, STAR_ASSIGN, SLASH_ASSIGN, MOD_ASSIGN, POWER_ASSIGN
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
_BLOCK_STATEMENTS_CACHE: "OrderedDict[tuple, tuple]" = OrderedDict()
|
|
28
|
+
_BLOCK_STATEMENTS_CACHE_MAX = 256
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _block_tokens_signature(tokens):
|
|
32
|
+
return tuple((t.type, t.literal) for t in tokens)
|
|
9
33
|
|
|
10
34
|
# Import Parser for nested parsing (needed for LOG statement)
|
|
11
35
|
# Note: This is imported at runtime to avoid circular dependency
|
|
@@ -59,6 +83,11 @@ class ContextStackParser:
|
|
|
59
83
|
'paren_block': self._parse_paren_block_context,
|
|
60
84
|
'statement_block': self._parse_statement_block_context,
|
|
61
85
|
'bracket_block': self._parse_brace_block_context,
|
|
86
|
+
'color_statement': self._parse_color_statement,
|
|
87
|
+
'canvas_statement': self._parse_canvas_statement,
|
|
88
|
+
'graphics_statement': self._parse_graphics_statement,
|
|
89
|
+
'animation_statement': self._parse_animation_statement,
|
|
90
|
+
'clock_statement': self._parse_clock_statement,
|
|
62
91
|
# DIRECT handlers for specific statement types
|
|
63
92
|
IF: self._parse_statement_block_context,
|
|
64
93
|
FOR: self._parse_statement_block_context,
|
|
@@ -127,6 +156,12 @@ class ContextStackParser:
|
|
|
127
156
|
INJECT: self._parse_inject_statement,
|
|
128
157
|
VALIDATE: self._parse_validate_statement,
|
|
129
158
|
SANITIZE: self._parse_sanitize_statement,
|
|
159
|
+
COLOR: self._parse_color_statement,
|
|
160
|
+
CANVAS: self._parse_canvas_statement,
|
|
161
|
+
GRAPHICS: self._parse_graphics_statement,
|
|
162
|
+
ANIMATION: self._parse_animation_statement,
|
|
163
|
+
CLOCK: self._parse_clock_statement,
|
|
164
|
+
GC: self._parse_gc_statement_block,
|
|
130
165
|
}
|
|
131
166
|
|
|
132
167
|
def push_context(self, context_type, context_name=None):
|
|
@@ -147,6 +182,35 @@ class ContextStackParser:
|
|
|
147
182
|
"""Get the current parsing context"""
|
|
148
183
|
return self.current_context[-1] if self.current_context else 'global'
|
|
149
184
|
|
|
185
|
+
def _parse_destructure_via_traditional(self, tokens):
|
|
186
|
+
"""Delegate destructuring let/const parsing to the traditional parser.
|
|
187
|
+
|
|
188
|
+
This re-lexes the token stream through a mini UltimateParser instance so
|
|
189
|
+
that ``parse_let_statement`` / ``parse_const_statement`` (which already
|
|
190
|
+
understand ``{`` / ``[`` destructure patterns) handle the work.
|
|
191
|
+
"""
|
|
192
|
+
from ..lexer import Lexer
|
|
193
|
+
from .parser import UltimateParser
|
|
194
|
+
# Reconstruct source code faithfully — STRING tokens must be re-quoted
|
|
195
|
+
# so the re-lexer doesn't treat them as identifiers.
|
|
196
|
+
parts = []
|
|
197
|
+
for t in tokens:
|
|
198
|
+
if not t.literal:
|
|
199
|
+
continue
|
|
200
|
+
if t.type == 'STRING':
|
|
201
|
+
# Escape inner double-quotes and wrap in quotes
|
|
202
|
+
escaped = t.literal.replace('\\', '\\\\').replace('"', '\\"')
|
|
203
|
+
parts.append(f'"{escaped}"')
|
|
204
|
+
else:
|
|
205
|
+
parts.append(t.literal)
|
|
206
|
+
code = ' '.join(parts)
|
|
207
|
+
mini_lexer = Lexer(code)
|
|
208
|
+
mini_parser = UltimateParser(mini_lexer, 'universal', False)
|
|
209
|
+
mini_program = mini_parser.parse_program()
|
|
210
|
+
if mini_program and mini_program.statements:
|
|
211
|
+
return mini_program.statements[0]
|
|
212
|
+
return None
|
|
213
|
+
|
|
150
214
|
def parse_block(self, block_info, all_tokens):
|
|
151
215
|
"""Parse a block with context awareness"""
|
|
152
216
|
block_type = block_info.get('subtype', block_info['type'])
|
|
@@ -157,14 +221,17 @@ class ContextStackParser:
|
|
|
157
221
|
try:
|
|
158
222
|
# Early exit: if a block has no meaningful tokens, skip parsing it
|
|
159
223
|
tokens = block_info.get('tokens', []) or []
|
|
160
|
-
|
|
224
|
+
meaningful = False
|
|
225
|
+
for tok in tokens:
|
|
226
|
+
if tok.type in _MEANINGFUL_TOKEN_TYPES:
|
|
227
|
+
meaningful = True
|
|
228
|
+
break
|
|
161
229
|
lit = getattr(tok, 'literal', None)
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
return not (lit is None or lit == '')
|
|
230
|
+
if lit is not None and lit != '':
|
|
231
|
+
meaningful = True
|
|
232
|
+
break
|
|
166
233
|
|
|
167
|
-
if not
|
|
234
|
+
if not meaningful:
|
|
168
235
|
ctx_debug(f"Skipping empty/insignificant block tokens for {block_type}", level='debug')
|
|
169
236
|
return None
|
|
170
237
|
# Use appropriate parsing strategy for this context
|
|
@@ -249,6 +316,10 @@ class ContextStackParser:
|
|
|
249
316
|
parser_debug(" ❌ Invalid let statement: too few tokens")
|
|
250
317
|
return None
|
|
251
318
|
|
|
319
|
+
# Destructuring pattern: let {a, b} = expr or let [x, y] = expr
|
|
320
|
+
if tokens[1].type in (LBRACE, LBRACKET):
|
|
321
|
+
return self._parse_destructure_via_traditional(tokens)
|
|
322
|
+
|
|
252
323
|
if tokens[1].type != IDENT:
|
|
253
324
|
parser_debug(" ❌ Invalid let statement: expected identifier after 'let'")
|
|
254
325
|
return None
|
|
@@ -387,7 +458,9 @@ class ContextStackParser:
|
|
|
387
458
|
|
|
388
459
|
# Check for statement starters that should break
|
|
389
460
|
# Context-sensitive: IF followed by THEN is an expression, not a statement
|
|
390
|
-
if t.type in {LET, PRINT, FOR, WHILE, RETURN, CONTINUE,
|
|
461
|
+
if t.type in {LET, CONST, DATA, PRINT, FOR, WHILE, RETURN, CONTINUE, BREAK, THROW, ACTION, TRY, FINALLY, EXTERNAL,
|
|
462
|
+
SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
|
|
463
|
+
EXPORT, USE, DEBUG}:
|
|
391
464
|
prev = tokens[j-1] if j > 0 else None
|
|
392
465
|
# Allow if part of method chain OR if DEBUG followed by ( (function call)
|
|
393
466
|
allow_method_chain = prev and prev.type == DOT
|
|
@@ -457,6 +530,10 @@ class ContextStackParser:
|
|
|
457
530
|
parser_debug(" ❌ Invalid const statement: too few tokens")
|
|
458
531
|
return None
|
|
459
532
|
|
|
533
|
+
# Destructuring pattern: const {a, b} = expr or const [x, y] = expr
|
|
534
|
+
if tokens[1].type in (LBRACE, LBRACKET):
|
|
535
|
+
return self._parse_destructure_via_traditional(tokens)
|
|
536
|
+
|
|
460
537
|
if tokens[1].type != IDENT:
|
|
461
538
|
parser_debug(" ❌ Invalid const statement: expected identifier after 'const'")
|
|
462
539
|
return None
|
|
@@ -527,7 +604,9 @@ class ContextStackParser:
|
|
|
527
604
|
j += 1 # Skip the semicolon
|
|
528
605
|
break
|
|
529
606
|
# Allow method chains but stop at other statement starters
|
|
530
|
-
if t.type in {LET, CONST, PRINT, FOR, IF, WHILE, RETURN,
|
|
607
|
+
if t.type in {LET, CONST, DATA, PRINT, FOR, IF, WHILE, RETURN, BREAK, THROW, ACTION, TRY, FINALLY, EXTERNAL,
|
|
608
|
+
SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
|
|
609
|
+
EXPORT, USE, DEBUG}:
|
|
531
610
|
prev = tokens[j-1] if j > 0 else None
|
|
532
611
|
if not (prev and prev.type == DOT): # Allow if part of method chain
|
|
533
612
|
break
|
|
@@ -743,7 +822,7 @@ class ContextStackParser:
|
|
|
743
822
|
operator_symbol = None
|
|
744
823
|
if i < len(tokens):
|
|
745
824
|
# Could be +, -, *, /, ==, etc.
|
|
746
|
-
if tokens[i].type in {PLUS, MINUS, STAR, SLASH, MOD, EQ, NOT_EQ, LT, GT, LTE, GTE}:
|
|
825
|
+
if tokens[i].type in {PLUS, MINUS, STAR, SLASH, MOD, EQ, NOT_EQ, LT, GT, LTE, GTE, POWER}:
|
|
747
826
|
operator_symbol = tokens[i].literal
|
|
748
827
|
parser_debug(f" Operator: {operator_symbol}")
|
|
749
828
|
i += 1
|
|
@@ -1123,15 +1202,27 @@ class ContextStackParser:
|
|
|
1123
1202
|
parser_debug("🔧 [Context] Parsing assignment statement")
|
|
1124
1203
|
tokens = block_info['tokens']
|
|
1125
1204
|
|
|
1126
|
-
# Find the ASSIGN operator
|
|
1205
|
+
# Find the ASSIGN operator or compound assignment token
|
|
1127
1206
|
assign_idx = None
|
|
1128
|
-
compound_operator = None # Track if this is +=, -=, *=, /=,
|
|
1207
|
+
compound_operator = None # Track if this is +=, -=, *=, /=, %=, **=
|
|
1208
|
+
|
|
1209
|
+
# Map compound assignment token types to their operator strings
|
|
1210
|
+
_compound_assign_map = {
|
|
1211
|
+
PLUS_ASSIGN: "+", MINUS_ASSIGN: "-", STAR_ASSIGN: "*",
|
|
1212
|
+
SLASH_ASSIGN: "/", MOD_ASSIGN: "%", POWER_ASSIGN: "**"
|
|
1213
|
+
}
|
|
1129
1214
|
|
|
1130
1215
|
for i, tok in enumerate(tokens):
|
|
1131
|
-
|
|
1216
|
+
# Check for single-token compound assignments (+=, -=, *=, /=, %=, **=)
|
|
1217
|
+
if tok.type in _compound_assign_map:
|
|
1218
|
+
assign_idx = i
|
|
1219
|
+
compound_operator = _compound_assign_map[tok.type]
|
|
1220
|
+
parser_debug(f" 🔍 Detected compound assignment token: {compound_operator}= at position {i}")
|
|
1221
|
+
break
|
|
1222
|
+
elif tok.type == ASSIGN:
|
|
1132
1223
|
assign_idx = i
|
|
1133
|
-
#
|
|
1134
|
-
if i > 0 and tokens[i-1].type in {PLUS, MINUS, STAR, SLASH, MOD}:
|
|
1224
|
+
# Legacy check: operator immediately before = (in case of split tokens)
|
|
1225
|
+
if i > 0 and tokens[i-1].type in {PLUS, MINUS, STAR, SLASH, MOD, POWER}:
|
|
1135
1226
|
compound_operator = tokens[i-1].literal
|
|
1136
1227
|
parser_debug(f" 🔍 Detected compound operator: {compound_operator}= at position {i-1}")
|
|
1137
1228
|
assign_idx = i # Keep assign_idx at the = position
|
|
@@ -1142,11 +1233,11 @@ class ContextStackParser:
|
|
|
1142
1233
|
return None
|
|
1143
1234
|
|
|
1144
1235
|
# Parse the left-hand side (could be identifier or property access)
|
|
1145
|
-
# For compound operators (
|
|
1146
|
-
if compound_operator:
|
|
1236
|
+
# For compound operators with split tokens (op + =), exclude the operator token from LHS
|
|
1237
|
+
if compound_operator and tokens[assign_idx].type == ASSIGN:
|
|
1147
1238
|
lhs_tokens = tokens[:assign_idx-1] # Exclude the operator before =
|
|
1148
1239
|
else:
|
|
1149
|
-
lhs_tokens = tokens[:assign_idx]
|
|
1240
|
+
lhs_tokens = tokens[:assign_idx] # For single-token compound assignments
|
|
1150
1241
|
|
|
1151
1242
|
# Check if this is a property access (e.g., obj.property or obj["key"])
|
|
1152
1243
|
target_expr = None
|
|
@@ -1165,7 +1256,14 @@ class ContextStackParser:
|
|
|
1165
1256
|
# Track nesting depth to avoid stopping on braces inside nested structures
|
|
1166
1257
|
value_tokens = []
|
|
1167
1258
|
stop_types = {SEMICOLON} # RBRACE removed - handle with nesting instead
|
|
1168
|
-
statement_starters = {
|
|
1259
|
+
statement_starters = {
|
|
1260
|
+
LET, CONST, DATA, PRINT, FOR, IF, WHILE, RETURN, CONTINUE, BREAK, THROW, ACTION, TRY, FINALLY, EXTERNAL,
|
|
1261
|
+
SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK,
|
|
1262
|
+
EXPORT, USE, DEBUG, ENTITY, CONTRACT, AUDIT, RESTRICT, SANDBOX, TRAIL, NATIVE, GC, INLINE,
|
|
1263
|
+
BUFFER, SIMD, DEFER, PATTERN, ENUM, STREAM, WATCH, CAPABILITY, GRANT,
|
|
1264
|
+
REVOKE, VALIDATE, SANITIZE, IMMUTABLE, INTERFACE, TYPE_ALIAS, MODULE,
|
|
1265
|
+
PACKAGE, USING
|
|
1266
|
+
}
|
|
1169
1267
|
j = assign_idx + 1
|
|
1170
1268
|
nesting_depth = 0
|
|
1171
1269
|
while j < len(tokens):
|
|
@@ -1444,6 +1542,22 @@ class ContextStackParser:
|
|
|
1444
1542
|
if len(tokens) < 3:
|
|
1445
1543
|
return None
|
|
1446
1544
|
|
|
1545
|
+
# Optimization: Check AST Cache
|
|
1546
|
+
token_hash = None
|
|
1547
|
+
try:
|
|
1548
|
+
# Create a signature from tokens (literal + type)
|
|
1549
|
+
# We use a tuple which is hashable and fast
|
|
1550
|
+
tv = tuple((t.literal, t.type) for t in tokens)
|
|
1551
|
+
token_hash = str(hash(tv))
|
|
1552
|
+
|
|
1553
|
+
from ..module_cache import get_cached_contract_ast, cache_contract_ast
|
|
1554
|
+
cached = get_cached_contract_ast(token_hash)
|
|
1555
|
+
if cached:
|
|
1556
|
+
parser_debug(f" ⚡ Cache Hit for Contract: {token_hash}")
|
|
1557
|
+
return cached
|
|
1558
|
+
except Exception:
|
|
1559
|
+
pass
|
|
1560
|
+
|
|
1447
1561
|
# 1. Extract Name
|
|
1448
1562
|
contract_name = tokens[1].literal if tokens[1].type == IDENT else "UnknownContract"
|
|
1449
1563
|
parser_debug(f" 📝 Contract Name: {contract_name}")
|
|
@@ -1828,6 +1942,12 @@ class ContextStackParser:
|
|
|
1828
1942
|
contract_stmt.storage_vars = storage_vars
|
|
1829
1943
|
contract_stmt.actions = actions
|
|
1830
1944
|
|
|
1945
|
+
if token_hash:
|
|
1946
|
+
try:
|
|
1947
|
+
cache_contract_ast(token_hash, contract_stmt)
|
|
1948
|
+
except Exception:
|
|
1949
|
+
pass
|
|
1950
|
+
|
|
1831
1951
|
return contract_stmt
|
|
1832
1952
|
|
|
1833
1953
|
# === FIXED USE STATEMENT PARSERS ===
|
|
@@ -1892,7 +2012,7 @@ class ContextStackParser:
|
|
|
1892
2012
|
for i, token in enumerate(tokens):
|
|
1893
2013
|
if token.type == STRING:
|
|
1894
2014
|
file_path = token.literal
|
|
1895
|
-
elif token.type == IDENT and token.literal == 'as':
|
|
2015
|
+
elif token.type == AS or (token.type == IDENT and token.literal == 'as'):
|
|
1896
2016
|
if i + 1 < len(tokens) and tokens[i + 1].type == IDENT:
|
|
1897
2017
|
alias = tokens[i + 1].literal
|
|
1898
2018
|
|
|
@@ -1905,7 +2025,11 @@ class ContextStackParser:
|
|
|
1905
2025
|
def _parse_export_statement_block(self, block_info, all_tokens):
|
|
1906
2026
|
"""Parse export statement: export { name1, name2, ... } or export const/let X = value"""
|
|
1907
2027
|
tokens = block_info['tokens']
|
|
1908
|
-
|
|
2028
|
+
if (
|
|
2029
|
+
(hasattr(zexus_config, 'enable_parser_debug') and zexus_config.enable_parser_debug)
|
|
2030
|
+
or zexus_config.should_log('debug')
|
|
2031
|
+
):
|
|
2032
|
+
parser_debug(f" 📝 Found export statement: {[t.literal for t in tokens]}")
|
|
1909
2033
|
|
|
1910
2034
|
# Check if this is "export const/let ..." syntax
|
|
1911
2035
|
if len(tokens) >= 2 and tokens[0].type == EXPORT and tokens[1].type in [CONST, LET]:
|
|
@@ -1989,6 +2113,23 @@ class ContextStackParser:
|
|
|
1989
2113
|
result.statements = [contract_stmt, export_stmt]
|
|
1990
2114
|
return result
|
|
1991
2115
|
|
|
2116
|
+
# Check if this is "export data Name {...}" syntax
|
|
2117
|
+
if len(tokens) >= 3 and tokens[0].type == EXPORT and tokens[1].type == DATA:
|
|
2118
|
+
parser_debug(" 🎯 Handling 'export data' statement")
|
|
2119
|
+
|
|
2120
|
+
data_tokens = tokens[1:]
|
|
2121
|
+
data_stmt = self._parse_data_statement(data_tokens)
|
|
2122
|
+
|
|
2123
|
+
if data_stmt is None:
|
|
2124
|
+
return None
|
|
2125
|
+
|
|
2126
|
+
data_name = data_stmt.name.value if hasattr(data_stmt.name, 'value') else str(data_stmt.name)
|
|
2127
|
+
|
|
2128
|
+
export_stmt = ExportStatement(names=[Identifier(data_name)])
|
|
2129
|
+
result = BlockStatement()
|
|
2130
|
+
result.statements = [data_stmt, export_stmt]
|
|
2131
|
+
return result
|
|
2132
|
+
|
|
1992
2133
|
# Check if this is "export function name(...) {...}" syntax
|
|
1993
2134
|
if len(tokens) >= 3 and tokens[0].type == EXPORT and tokens[1].type == FUNCTION:
|
|
1994
2135
|
parser_debug(" 🎯 Handling 'export function' statement")
|
|
@@ -2116,8 +2257,13 @@ class ContextStackParser:
|
|
|
2116
2257
|
return self._parse_generic_statement_block(block_info, all_tokens)
|
|
2117
2258
|
|
|
2118
2259
|
def _parse_generic_statement_block(self, block_info, all_tokens):
|
|
2119
|
-
"""Parse generic statement block - RETURNS ExpressionStatement"""
|
|
2260
|
+
"""Parse generic statement block - RETURNS ExpressionStatement or AssignmentExpression"""
|
|
2120
2261
|
tokens = block_info['tokens']
|
|
2262
|
+
# Check for compound assignment tokens (+=, -=, *=, /=, %=, **=)
|
|
2263
|
+
_compound_types = {PLUS_ASSIGN, MINUS_ASSIGN, STAR_ASSIGN, SLASH_ASSIGN, MOD_ASSIGN, POWER_ASSIGN}
|
|
2264
|
+
for tok in tokens:
|
|
2265
|
+
if tok.type in _compound_types or tok.type == ASSIGN:
|
|
2266
|
+
return self._parse_assignment_statement(block_info, all_tokens)
|
|
2121
2267
|
expression = self._parse_expression(tokens)
|
|
2122
2268
|
if expression:
|
|
2123
2269
|
return ExpressionStatement(expression)
|
|
@@ -2134,11 +2280,13 @@ class ContextStackParser:
|
|
|
2134
2280
|
try_block = self._parse_try_block(tokens)
|
|
2135
2281
|
error_var = self._extract_catch_variable(tokens)
|
|
2136
2282
|
catch_block = self._parse_catch_block(tokens)
|
|
2283
|
+
finally_block = self._parse_finally_block(tokens)
|
|
2137
2284
|
|
|
2138
2285
|
return TryCatchStatement(
|
|
2139
2286
|
try_block=try_block,
|
|
2140
2287
|
error_variable=error_var,
|
|
2141
|
-
catch_block=catch_block
|
|
2288
|
+
catch_block=catch_block,
|
|
2289
|
+
finally_block=finally_block
|
|
2142
2290
|
)
|
|
2143
2291
|
|
|
2144
2292
|
def _parse_try_block(self, tokens):
|
|
@@ -2205,15 +2353,57 @@ class ContextStackParser:
|
|
|
2205
2353
|
parser_debug(" ⚠️ [Catch] Could not find catch block content")
|
|
2206
2354
|
return BlockStatement()
|
|
2207
2355
|
|
|
2356
|
+
def _parse_finally_block(self, tokens):
|
|
2357
|
+
"""Parse the finally block from tokens, if present. Returns BlockStatement or None."""
|
|
2358
|
+
finally_start = -1
|
|
2359
|
+
finally_end = -1
|
|
2360
|
+
brace_count = 0
|
|
2361
|
+
in_finally = False
|
|
2362
|
+
|
|
2363
|
+
for i, token in enumerate(tokens):
|
|
2364
|
+
if token.type == FINALLY:
|
|
2365
|
+
in_finally = True
|
|
2366
|
+
elif in_finally and token.type == LBRACE:
|
|
2367
|
+
if brace_count == 0:
|
|
2368
|
+
finally_start = i + 1
|
|
2369
|
+
brace_count += 1
|
|
2370
|
+
elif in_finally and token.type == RBRACE:
|
|
2371
|
+
brace_count -= 1
|
|
2372
|
+
if brace_count == 0:
|
|
2373
|
+
finally_end = i
|
|
2374
|
+
break
|
|
2375
|
+
|
|
2376
|
+
if finally_start != -1 and finally_end != -1 and finally_end > finally_start:
|
|
2377
|
+
finally_tokens = tokens[finally_start:finally_end]
|
|
2378
|
+
finally_block_statements = self._parse_block_statements(finally_tokens)
|
|
2379
|
+
block = BlockStatement()
|
|
2380
|
+
block.statements = finally_block_statements
|
|
2381
|
+
return block
|
|
2382
|
+
|
|
2383
|
+
return None
|
|
2384
|
+
|
|
2208
2385
|
def _parse_block_statements(self, tokens):
|
|
2209
2386
|
"""Parse statements from a block of tokens"""
|
|
2210
2387
|
if not tokens:
|
|
2211
2388
|
return []
|
|
2389
|
+
|
|
2390
|
+
cache_key = None
|
|
2391
|
+
# Large blocks can be thousands of tokens; building a signature tuple
|
|
2392
|
+
# (and looking it up) can cost significant time/memory. Skip caching
|
|
2393
|
+
# for very large blocks to keep parsing stable.
|
|
2394
|
+
if len(tokens) <= 2000:
|
|
2395
|
+
try:
|
|
2396
|
+
cache_key = _block_tokens_signature(tokens)
|
|
2397
|
+
cached = _BLOCK_STATEMENTS_CACHE.get(cache_key)
|
|
2398
|
+
if cached is not None:
|
|
2399
|
+
return list(cached)
|
|
2400
|
+
except Exception:
|
|
2401
|
+
cache_key = None
|
|
2212
2402
|
|
|
2213
2403
|
statements = []
|
|
2214
2404
|
i = 0
|
|
2215
2405
|
# Common statement-starter tokens used by several heuristics and fallbacks
|
|
2216
|
-
statement_starters =
|
|
2406
|
+
statement_starters = STATEMENT_STARTERS
|
|
2217
2407
|
|
|
2218
2408
|
# Safety: track loop iterations to prevent infinite loops
|
|
2219
2409
|
max_iterations = len(tokens) * 10 # Very generous limit
|
|
@@ -2410,7 +2600,7 @@ class ContextStackParser:
|
|
|
2410
2600
|
prev_tok = tokens[j-1] if j > 0 else None
|
|
2411
2601
|
is_method_call_continuation = prev_tok and prev_tok.type == DOT
|
|
2412
2602
|
is_expression_continuation = prev_tok and prev_tok.type in {
|
|
2413
|
-
PLUS, MINUS, STAR, SLASH, MOD, # Arithmetic operators
|
|
2603
|
+
PLUS, MINUS, STAR, SLASH, MOD, POWER, # Arithmetic operators
|
|
2414
2604
|
EQ, NOT_EQ, LT, GT, LTE, GTE, # Comparison operators
|
|
2415
2605
|
AND, OR, # Logical operators
|
|
2416
2606
|
COMMA, # List separator
|
|
@@ -2495,6 +2685,32 @@ class ContextStackParser:
|
|
|
2495
2685
|
|
|
2496
2686
|
i = j
|
|
2497
2687
|
|
|
2688
|
+
# GC statement heuristic (performance control)
|
|
2689
|
+
elif token.type == GC:
|
|
2690
|
+
j = i + 1
|
|
2691
|
+
action_literal = None
|
|
2692
|
+
|
|
2693
|
+
if j < len(tokens) and tokens[j].type == STRING:
|
|
2694
|
+
action_literal = tokens[j].literal
|
|
2695
|
+
j += 1
|
|
2696
|
+
|
|
2697
|
+
# Consume optional semicolon directly following the statement
|
|
2698
|
+
if j < len(tokens) and tokens[j].type == SEMICOLON:
|
|
2699
|
+
j += 1
|
|
2700
|
+
|
|
2701
|
+
if action_literal is not None:
|
|
2702
|
+
statements.append(GCStatement(action_literal))
|
|
2703
|
+
i = j
|
|
2704
|
+
continue
|
|
2705
|
+
|
|
2706
|
+
# Fallback: parse as expression if tokens are malformed
|
|
2707
|
+
gc_tokens = tokens[i:j]
|
|
2708
|
+
expr = self._parse_expression(gc_tokens)
|
|
2709
|
+
if expr:
|
|
2710
|
+
statements.append(ExpressionStatement(expr))
|
|
2711
|
+
i = j if j > i else i + 1
|
|
2712
|
+
continue
|
|
2713
|
+
|
|
2498
2714
|
# DATA statement heuristic (dataclass definition)
|
|
2499
2715
|
# But not if DATA is used as an identifier (data = ...)
|
|
2500
2716
|
elif token.type == DATA and i + 1 < len(tokens) and tokens[i + 1].type not in [ASSIGN, LBRACKET, DOT, LPAREN]:
|
|
@@ -3908,6 +4124,14 @@ class ContextStackParser:
|
|
|
3908
4124
|
i = j
|
|
3909
4125
|
|
|
3910
4126
|
# print(f" ✅ Parsed {len(statements)} statements from block")
|
|
4127
|
+
if cache_key is not None:
|
|
4128
|
+
try:
|
|
4129
|
+
_BLOCK_STATEMENTS_CACHE[cache_key] = tuple(statements)
|
|
4130
|
+
if len(_BLOCK_STATEMENTS_CACHE) > _BLOCK_STATEMENTS_CACHE_MAX:
|
|
4131
|
+
_BLOCK_STATEMENTS_CACHE.popitem(last=False)
|
|
4132
|
+
except Exception:
|
|
4133
|
+
pass
|
|
4134
|
+
|
|
3911
4135
|
return statements
|
|
3912
4136
|
|
|
3913
4137
|
# === MAP LITERAL PARSING ===
|
|
@@ -4007,7 +4231,13 @@ class ContextStackParser:
|
|
|
4007
4231
|
# Collect tokens up to a statement boundary
|
|
4008
4232
|
inner_tokens = []
|
|
4009
4233
|
statement_terminators = {SEMICOLON, RBRACE}
|
|
4010
|
-
statement_starters = {
|
|
4234
|
+
statement_starters = {
|
|
4235
|
+
LET, CONST, PRINT, FOR, IF, WHILE, RETURN, CONTINUE, ACTION, TRY, AUDIT,
|
|
4236
|
+
RESTRICT, SANDBOX, TRAIL, NATIVE, GC, INLINE, BUFFER, SIMD, DEFER, PATTERN,
|
|
4237
|
+
ENUM, STREAM, WATCH, CAPABILITY, GRANT, REVOKE, VALIDATE, SANITIZE,
|
|
4238
|
+
IMMUTABLE, INTERFACE, TYPE_ALIAS, MODULE, PACKAGE, USING,
|
|
4239
|
+
SCREEN, COLOR, CANVAS, GRAPHICS, ANIMATION, CLOCK
|
|
4240
|
+
}
|
|
4011
4241
|
nesting_level = 0
|
|
4012
4242
|
|
|
4013
4243
|
for token in tokens[1:]: # Skip the PRINT token
|
|
@@ -4299,6 +4529,10 @@ class ContextStackParser:
|
|
|
4299
4529
|
return self._parse_sanitize_expression(tokens)
|
|
4300
4530
|
if tokens[0].type == AWAIT:
|
|
4301
4531
|
return self._parse_await_expression(tokens)
|
|
4532
|
+
if tokens[0].type == FIND:
|
|
4533
|
+
return self._parse_find_keyword_expression(tokens)
|
|
4534
|
+
if tokens[0].type == LOAD:
|
|
4535
|
+
return self._parse_load_keyword_expression(tokens)
|
|
4302
4536
|
|
|
4303
4537
|
# Main expression parser with chaining
|
|
4304
4538
|
i = 0
|
|
@@ -4543,16 +4777,46 @@ class ContextStackParser:
|
|
|
4543
4777
|
# If there's a closing RBRACKET, skip it
|
|
4544
4778
|
if i < n and tokens[i].type == RBRACKET:
|
|
4545
4779
|
i += 1
|
|
4546
|
-
|
|
4547
|
-
|
|
4548
|
-
|
|
4549
|
-
|
|
4550
|
-
|
|
4551
|
-
|
|
4780
|
+
colon_idx = None
|
|
4781
|
+
depth = 0
|
|
4782
|
+
for idx, tok in enumerate(inner_tokens):
|
|
4783
|
+
if tok.type in {LBRACKET, LPAREN, LBRACE}:
|
|
4784
|
+
depth += 1
|
|
4785
|
+
elif tok.type in {RBRACKET, RPAREN, RBRACE}:
|
|
4786
|
+
if depth > 0:
|
|
4787
|
+
depth -= 1
|
|
4788
|
+
elif tok.type == COLON and depth == 0:
|
|
4789
|
+
colon_idx = idx
|
|
4790
|
+
break
|
|
4791
|
+
|
|
4792
|
+
if colon_idx is not None:
|
|
4793
|
+
start_tokens = inner_tokens[:colon_idx]
|
|
4794
|
+
end_tokens = inner_tokens[colon_idx + 1:]
|
|
4795
|
+
start_expr = self._parse_expression(start_tokens) if start_tokens else None
|
|
4796
|
+
end_expr = self._parse_expression(end_tokens) if end_tokens else None
|
|
4797
|
+
current_expr = SliceExpression(
|
|
4798
|
+
object=current_expr,
|
|
4799
|
+
start=start_expr,
|
|
4800
|
+
end=end_expr
|
|
4801
|
+
)
|
|
4802
|
+
else:
|
|
4803
|
+
if not inner_tokens:
|
|
4804
|
+
parser_debug(" ❌ Empty bracket access is invalid")
|
|
4805
|
+
prop_expr = StringLiteral("")
|
|
4806
|
+
else:
|
|
4807
|
+
prop_expr = self._parse_expression(inner_tokens)
|
|
4808
|
+
if prop_expr is None:
|
|
4809
|
+
parser_debug(" ❌ Could not parse bracket expression")
|
|
4810
|
+
prop_expr = StringLiteral("")
|
|
4811
|
+
current_expr = PropertyAccessExpression(
|
|
4812
|
+
object=current_expr,
|
|
4813
|
+
property=prop_expr,
|
|
4814
|
+
computed=True
|
|
4815
|
+
)
|
|
4552
4816
|
continue
|
|
4553
4817
|
|
|
4554
4818
|
# Binary operators (comparisons and arithmetic - but NOT AND/OR which are handled above)
|
|
4555
|
-
if t.type in {PLUS, MINUS, ASTERISK, SLASH, MOD,
|
|
4819
|
+
if t.type in {PLUS, MINUS, ASTERISK, SLASH, MOD, POWER,
|
|
4556
4820
|
LT, GT, EQ, NOT_EQ, LTE, GTE}:
|
|
4557
4821
|
# Get operator precedence
|
|
4558
4822
|
op_precedence = self._get_operator_precedence(t.type)
|
|
@@ -4578,7 +4842,7 @@ class ContextStackParser:
|
|
|
4578
4842
|
break
|
|
4579
4843
|
|
|
4580
4844
|
# If we're not nested and we hit an operator with same or lower precedence, stop
|
|
4581
|
-
if depth == 0 and tt.type in {PLUS, MINUS, ASTERISK, SLASH, MOD, LT, GT, EQ, NOT_EQ, LTE, GTE}:
|
|
4845
|
+
if depth == 0 and tt.type in {PLUS, MINUS, ASTERISK, SLASH, MOD, POWER, LT, GT, EQ, NOT_EQ, LTE, GTE}:
|
|
4582
4846
|
next_precedence = self._get_operator_precedence(tt.type)
|
|
4583
4847
|
# For left-associative operators, stop if next has same or lower precedence
|
|
4584
4848
|
if next_precedence <= op_precedence:
|
|
@@ -4610,12 +4874,15 @@ class ContextStackParser:
|
|
|
4610
4874
|
Higher numbers = higher precedence (evaluated first)
|
|
4611
4875
|
"""
|
|
4612
4876
|
# Precedence levels (matching parser.py conventions)
|
|
4877
|
+
POWER_PREC = 10 # ** (exponentiation)
|
|
4613
4878
|
PRODUCT = 9 # *, /, %
|
|
4614
4879
|
SUM = 8 # +, -
|
|
4615
4880
|
COMPARISON = 7 # <, >, <=, >=
|
|
4616
4881
|
EQUALITY = 6 # ==, !=
|
|
4617
4882
|
|
|
4618
|
-
if token_type
|
|
4883
|
+
if token_type == POWER:
|
|
4884
|
+
return POWER_PREC
|
|
4885
|
+
elif token_type in {ASTERISK, SLASH, MOD}:
|
|
4619
4886
|
return PRODUCT
|
|
4620
4887
|
elif token_type in {PLUS, MINUS}:
|
|
4621
4888
|
return SUM
|
|
@@ -4630,6 +4897,21 @@ class ContextStackParser:
|
|
|
4630
4897
|
"""Parse a single token into an expression"""
|
|
4631
4898
|
if token.type == STRING:
|
|
4632
4899
|
return StringLiteral(token.literal)
|
|
4900
|
+
elif token.type == INTERP_STRING:
|
|
4901
|
+
# Interpolated string — delegate to sub-parser for each expr part
|
|
4902
|
+
from ..lexer import Lexer as _Lexer
|
|
4903
|
+
raw_parts = token.literal # list of ("str", text) | ("expr", source)
|
|
4904
|
+
parsed_parts = []
|
|
4905
|
+
for part_type, part_value in raw_parts:
|
|
4906
|
+
if part_type == "str":
|
|
4907
|
+
parsed_parts.append(("str", part_value))
|
|
4908
|
+
elif part_type == "expr":
|
|
4909
|
+
sub_lexer = _Lexer(part_value)
|
|
4910
|
+
from .parser import Parser as _Parser, LOWEST as _LOWEST
|
|
4911
|
+
sub_parser = _Parser(sub_lexer)
|
|
4912
|
+
expr_node = sub_parser.parse_expression(_LOWEST)
|
|
4913
|
+
parsed_parts.append(("expr", expr_node if expr_node else StringLiteral("")))
|
|
4914
|
+
return StringInterpolationExpression(parts=parsed_parts)
|
|
4633
4915
|
elif token.type == INT:
|
|
4634
4916
|
try:
|
|
4635
4917
|
return IntegerLiteral(int(token.literal))
|
|
@@ -4716,6 +4998,29 @@ class ContextStackParser:
|
|
|
4716
4998
|
|
|
4717
4999
|
return nested_tokens
|
|
4718
5000
|
|
|
5001
|
+
def _collect_enclosed_tokens(self, tokens, start_index, open_type, close_type):
|
|
5002
|
+
"""Collect tokens enclosed by matching delimiters starting at start_index."""
|
|
5003
|
+
if start_index >= len(tokens) or tokens[start_index].type != open_type:
|
|
5004
|
+
return [], start_index
|
|
5005
|
+
|
|
5006
|
+
inner_tokens = []
|
|
5007
|
+
depth = 1
|
|
5008
|
+
i = start_index + 1
|
|
5009
|
+
|
|
5010
|
+
while i < len(tokens) and depth > 0:
|
|
5011
|
+
token = tokens[i]
|
|
5012
|
+
if token.type == open_type:
|
|
5013
|
+
depth += 1
|
|
5014
|
+
elif token.type == close_type:
|
|
5015
|
+
depth -= 1
|
|
5016
|
+
if depth == 0:
|
|
5017
|
+
break
|
|
5018
|
+
if depth > 0:
|
|
5019
|
+
inner_tokens.append(token)
|
|
5020
|
+
i += 1
|
|
5021
|
+
|
|
5022
|
+
return inner_tokens, i
|
|
5023
|
+
|
|
4719
5024
|
def _parse_list_literal(self, tokens):
|
|
4720
5025
|
"""Parse a list literal [a, b, c] from a token list"""
|
|
4721
5026
|
if not tokens or tokens[0].type != LBRACKET:
|
|
@@ -4863,14 +5168,81 @@ class ContextStackParser:
|
|
|
4863
5168
|
|
|
4864
5169
|
# Parse match cases
|
|
4865
5170
|
cases = []
|
|
5171
|
+
|
|
5172
|
+
# Support case/colon syntax when present
|
|
5173
|
+
if any(t.type in {CASE, DEFAULT} for t in body_tokens):
|
|
5174
|
+
i = 0
|
|
5175
|
+
while i < len(body_tokens):
|
|
5176
|
+
if body_tokens[i].type in {COMMA, SEMICOLON}:
|
|
5177
|
+
i += 1
|
|
5178
|
+
continue
|
|
5179
|
+
|
|
5180
|
+
pattern = None
|
|
5181
|
+
|
|
5182
|
+
if body_tokens[i].type == DEFAULT:
|
|
5183
|
+
pattern = WildcardPattern()
|
|
5184
|
+
i += 1
|
|
5185
|
+
if i < len(body_tokens) and body_tokens[i].type == COLON:
|
|
5186
|
+
i += 1
|
|
5187
|
+
elif body_tokens[i].type == CASE:
|
|
5188
|
+
i += 1
|
|
5189
|
+
pattern_start = i
|
|
5190
|
+
depth = 0
|
|
5191
|
+
while i < len(body_tokens):
|
|
5192
|
+
t = body_tokens[i]
|
|
5193
|
+
if t.type in {LPAREN, LBRACE, LBRACKET}:
|
|
5194
|
+
depth += 1
|
|
5195
|
+
elif t.type in {RPAREN, RBRACE, RBRACKET}:
|
|
5196
|
+
depth -= 1
|
|
5197
|
+
elif t.type == COLON and depth == 0:
|
|
5198
|
+
break
|
|
5199
|
+
i += 1
|
|
5200
|
+
|
|
5201
|
+
colon_idx = i
|
|
5202
|
+
pattern_tokens = body_tokens[pattern_start:colon_idx]
|
|
5203
|
+
pattern = self._parse_pattern(pattern_tokens) if pattern_tokens else None
|
|
5204
|
+
|
|
5205
|
+
if i < len(body_tokens) and body_tokens[i].type == COLON:
|
|
5206
|
+
i += 1
|
|
5207
|
+
else:
|
|
5208
|
+
i += 1
|
|
5209
|
+
continue
|
|
5210
|
+
|
|
5211
|
+
result_start = i
|
|
5212
|
+
depth = 0
|
|
5213
|
+
while i < len(body_tokens):
|
|
5214
|
+
t = body_tokens[i]
|
|
5215
|
+
if t.type in {LPAREN, LBRACE, LBRACKET}:
|
|
5216
|
+
depth += 1
|
|
5217
|
+
elif t.type in {RPAREN, RBRACE, RBRACKET}:
|
|
5218
|
+
depth -= 1
|
|
5219
|
+
elif depth == 0 and t.type in {CASE, DEFAULT}:
|
|
5220
|
+
break
|
|
5221
|
+
elif depth == 0 and t.type in {COMMA, SEMICOLON}:
|
|
5222
|
+
break
|
|
5223
|
+
i += 1
|
|
5224
|
+
|
|
5225
|
+
result_tokens = body_tokens[result_start:i]
|
|
5226
|
+
result_expr = self._parse_expression(result_tokens) if result_tokens else NullLiteral()
|
|
5227
|
+
|
|
5228
|
+
if pattern:
|
|
5229
|
+
cases.append(MatchCase(pattern=pattern, result=result_expr))
|
|
5230
|
+
parser_debug(f" ✅ Parsed match case: {pattern} : {result_expr}")
|
|
5231
|
+
|
|
5232
|
+
while i < len(body_tokens) and body_tokens[i].type in {COMMA, SEMICOLON}:
|
|
5233
|
+
i += 1
|
|
5234
|
+
|
|
5235
|
+
parser_debug(f" ✅ Match expression with {len(cases)} cases")
|
|
5236
|
+
return MatchExpression(value=value_expr, cases=cases)
|
|
5237
|
+
|
|
4866
5238
|
i = 0
|
|
4867
|
-
|
|
5239
|
+
|
|
4868
5240
|
while i < len(body_tokens):
|
|
4869
5241
|
# Skip commas and semicolons
|
|
4870
5242
|
if body_tokens[i].type in {COMMA, SEMICOLON}:
|
|
4871
5243
|
i += 1
|
|
4872
5244
|
continue
|
|
4873
|
-
|
|
5245
|
+
|
|
4874
5246
|
# Find the => separator
|
|
4875
5247
|
arrow_idx = -1
|
|
4876
5248
|
depth = 0
|
|
@@ -4882,25 +5254,25 @@ class ContextStackParser:
|
|
|
4882
5254
|
elif body_tokens[j].type == LAMBDA and depth == 0: # => is tokenized as LAMBDA
|
|
4883
5255
|
arrow_idx = j
|
|
4884
5256
|
break
|
|
4885
|
-
|
|
5257
|
+
|
|
4886
5258
|
if arrow_idx == -1:
|
|
4887
5259
|
# No more cases
|
|
4888
5260
|
break
|
|
4889
|
-
|
|
5261
|
+
|
|
4890
5262
|
# Parse pattern (from i to arrow_idx)
|
|
4891
5263
|
pattern_tokens = body_tokens[i:arrow_idx]
|
|
4892
5264
|
pattern = self._parse_pattern(pattern_tokens)
|
|
4893
|
-
|
|
5265
|
+
|
|
4894
5266
|
if not pattern:
|
|
4895
5267
|
parser_debug(f" ❌ Failed to parse pattern: {[t.literal for t in pattern_tokens]}")
|
|
4896
5268
|
i = arrow_idx + 1
|
|
4897
5269
|
continue
|
|
4898
|
-
|
|
5270
|
+
|
|
4899
5271
|
# Find result expression end (comma, semicolon, or next pattern)
|
|
4900
5272
|
result_start = arrow_idx + 1
|
|
4901
5273
|
result_end = result_start
|
|
4902
5274
|
depth = 0
|
|
4903
|
-
|
|
5275
|
+
|
|
4904
5276
|
while result_end < len(body_tokens):
|
|
4905
5277
|
if body_tokens[result_end].type in {LPAREN, LBRACE, LBRACKET}:
|
|
4906
5278
|
depth += 1
|
|
@@ -4915,7 +5287,7 @@ class ContextStackParser:
|
|
|
4915
5287
|
# Patterns can start with: INT, STRING, IDENT (for constructor or wildcard)
|
|
4916
5288
|
current_tok = body_tokens[result_end]
|
|
4917
5289
|
next_tok = body_tokens[result_end + 1]
|
|
4918
|
-
|
|
5290
|
+
|
|
4919
5291
|
# Pattern: literal => or _ => or Constructor( =>
|
|
4920
5292
|
if current_tok.type in {INT, STRING, TRUE, FALSE}:
|
|
4921
5293
|
# Look ahead for =>
|
|
@@ -4948,18 +5320,18 @@ class ContextStackParser:
|
|
|
4948
5320
|
lookahead += 1
|
|
4949
5321
|
if lookahead < len(body_tokens) and body_tokens[lookahead].type == LAMBDA:
|
|
4950
5322
|
break # Start of new constructor pattern
|
|
4951
|
-
|
|
5323
|
+
|
|
4952
5324
|
result_end += 1
|
|
4953
|
-
|
|
5325
|
+
|
|
4954
5326
|
# Parse result expression
|
|
4955
5327
|
result_tokens = body_tokens[result_start:result_end]
|
|
4956
5328
|
result_expr = self._parse_expression(result_tokens) if result_tokens else NullLiteral()
|
|
4957
|
-
|
|
5329
|
+
|
|
4958
5330
|
cases.append(MatchCase(pattern=pattern, result=result_expr))
|
|
4959
5331
|
parser_debug(f" ✅ Parsed match case: {pattern} => {result_expr}")
|
|
4960
|
-
|
|
5332
|
+
|
|
4961
5333
|
i = result_end
|
|
4962
|
-
|
|
5334
|
+
|
|
4963
5335
|
parser_debug(f" ✅ Match expression with {len(cases)} cases")
|
|
4964
5336
|
return MatchExpression(value=value_expr, cases=cases)
|
|
4965
5337
|
|
|
@@ -5203,6 +5575,69 @@ class ContextStackParser:
|
|
|
5203
5575
|
|
|
5204
5576
|
return AwaitExpression(expression)
|
|
5205
5577
|
|
|
5578
|
+
def _parse_find_keyword_expression(self, tokens):
|
|
5579
|
+
"""Parse find expression tokens produced by advanced strategies."""
|
|
5580
|
+
if not tokens or tokens[0].type != FIND:
|
|
5581
|
+
return None
|
|
5582
|
+
|
|
5583
|
+
# Separate target and optional scope after IN (at top level)
|
|
5584
|
+
scope_index = -1
|
|
5585
|
+
nesting = 0
|
|
5586
|
+
for idx in range(1, len(tokens)):
|
|
5587
|
+
tok = tokens[idx]
|
|
5588
|
+
if tok.type in {LPAREN, LBRACE, LBRACKET}:
|
|
5589
|
+
nesting += 1
|
|
5590
|
+
elif tok.type in {RPAREN, RBRACE, RBRACKET}:
|
|
5591
|
+
nesting -= 1
|
|
5592
|
+
elif tok.type == IN and nesting == 0:
|
|
5593
|
+
scope_index = idx
|
|
5594
|
+
break
|
|
5595
|
+
|
|
5596
|
+
target_tokens = tokens[1:scope_index] if scope_index != -1 else tokens[1:]
|
|
5597
|
+
scope_tokens = tokens[scope_index + 1:] if scope_index != -1 else []
|
|
5598
|
+
|
|
5599
|
+
target_expr = self._parse_expression(target_tokens) if target_tokens else None
|
|
5600
|
+
scope_expr = self._parse_expression(scope_tokens) if scope_tokens else None
|
|
5601
|
+
|
|
5602
|
+
if target_expr is None:
|
|
5603
|
+
target_expr = StringLiteral("")
|
|
5604
|
+
|
|
5605
|
+
expr = FindExpression(target=target_expr, scope=scope_expr)
|
|
5606
|
+
if tokens:
|
|
5607
|
+
setattr(expr, 'token', tokens[0])
|
|
5608
|
+
return expr
|
|
5609
|
+
|
|
5610
|
+
def _parse_load_keyword_expression(self, tokens):
|
|
5611
|
+
"""Parse load expression tokens produced by advanced strategies."""
|
|
5612
|
+
if not tokens or tokens[0].type != LOAD:
|
|
5613
|
+
return None
|
|
5614
|
+
|
|
5615
|
+
from_index = -1
|
|
5616
|
+
nesting = 0
|
|
5617
|
+
for idx in range(1, len(tokens)):
|
|
5618
|
+
tok = tokens[idx]
|
|
5619
|
+
if tok.type in {LPAREN, LBRACE, LBRACKET}:
|
|
5620
|
+
nesting += 1
|
|
5621
|
+
elif tok.type in {RPAREN, RBRACE, RBRACKET}:
|
|
5622
|
+
nesting -= 1
|
|
5623
|
+
elif tok.type == IDENT and tok.literal == "from" and nesting == 0:
|
|
5624
|
+
from_index = idx
|
|
5625
|
+
break
|
|
5626
|
+
|
|
5627
|
+
target_tokens = tokens[1:from_index] if from_index != -1 else tokens[1:]
|
|
5628
|
+
source_tokens = tokens[from_index + 1:] if from_index != -1 else []
|
|
5629
|
+
|
|
5630
|
+
target_expr = self._parse_expression(target_tokens) if target_tokens else None
|
|
5631
|
+
source_expr = self._parse_expression(source_tokens) if source_tokens else None
|
|
5632
|
+
|
|
5633
|
+
if target_expr is None:
|
|
5634
|
+
target_expr = StringLiteral("")
|
|
5635
|
+
|
|
5636
|
+
expr = LoadExpression(target=target_expr, source=source_expr)
|
|
5637
|
+
if tokens:
|
|
5638
|
+
setattr(expr, 'token', tokens[0])
|
|
5639
|
+
return expr
|
|
5640
|
+
|
|
5206
5641
|
def _parse_argument_list(self, tokens):
|
|
5207
5642
|
"""Parse comma-separated argument list with improved nesting support"""
|
|
5208
5643
|
parser_debug(" 🔍 Parsing argument list")
|
|
@@ -5277,6 +5712,237 @@ class ContextStackParser:
|
|
|
5277
5712
|
body=BlockStatement()
|
|
5278
5713
|
)
|
|
5279
5714
|
|
|
5715
|
+
def _parse_color_statement(self, block_info, all_tokens):
|
|
5716
|
+
"""Parse COLOR statement into ColorStatement AST."""
|
|
5717
|
+
tokens = block_info.get('tokens', []) or []
|
|
5718
|
+
if len(tokens) < 2:
|
|
5719
|
+
return None
|
|
5720
|
+
|
|
5721
|
+
name = None
|
|
5722
|
+
name_index = None
|
|
5723
|
+
for idx in range(1, len(tokens)):
|
|
5724
|
+
tok = tokens[idx]
|
|
5725
|
+
if tok.type == IDENT:
|
|
5726
|
+
name = Identifier(tok.literal)
|
|
5727
|
+
name_index = idx
|
|
5728
|
+
break
|
|
5729
|
+
|
|
5730
|
+
if name is None:
|
|
5731
|
+
return None
|
|
5732
|
+
|
|
5733
|
+
value = None
|
|
5734
|
+
i = name_index + 1
|
|
5735
|
+
while i < len(tokens):
|
|
5736
|
+
tok = tokens[i]
|
|
5737
|
+
if tok.type == ASSIGN:
|
|
5738
|
+
value_tokens = tokens[i + 1:]
|
|
5739
|
+
if value_tokens and value_tokens[-1].type == SEMICOLON:
|
|
5740
|
+
value_tokens = value_tokens[:-1]
|
|
5741
|
+
value = self._parse_expression(value_tokens) if value_tokens else None
|
|
5742
|
+
break
|
|
5743
|
+
elif tok.type == LBRACE:
|
|
5744
|
+
inner, close_idx = self._collect_enclosed_tokens(tokens, i, LBRACE, RBRACE)
|
|
5745
|
+
block = BlockStatement()
|
|
5746
|
+
block.statements = self._parse_block_statements(inner)
|
|
5747
|
+
value = block
|
|
5748
|
+
i = close_idx
|
|
5749
|
+
break
|
|
5750
|
+
elif tok.type == SEMICOLON:
|
|
5751
|
+
break
|
|
5752
|
+
i += 1
|
|
5753
|
+
|
|
5754
|
+
if value is None:
|
|
5755
|
+
value = NullLiteral()
|
|
5756
|
+
|
|
5757
|
+
return ColorStatement(name, value)
|
|
5758
|
+
|
|
5759
|
+
def _parse_canvas_statement(self, block_info, all_tokens):
|
|
5760
|
+
"""Parse CANVAS statement supporting dimensions and drawing body."""
|
|
5761
|
+
tokens = block_info.get('tokens', []) or []
|
|
5762
|
+
if len(tokens) < 2:
|
|
5763
|
+
return None
|
|
5764
|
+
|
|
5765
|
+
name = None
|
|
5766
|
+
name_index = None
|
|
5767
|
+
for idx in range(1, len(tokens)):
|
|
5768
|
+
tok = tokens[idx]
|
|
5769
|
+
if tok.type == IDENT:
|
|
5770
|
+
name = Identifier(tok.literal)
|
|
5771
|
+
name_index = idx
|
|
5772
|
+
break
|
|
5773
|
+
|
|
5774
|
+
if name is None:
|
|
5775
|
+
return None
|
|
5776
|
+
|
|
5777
|
+
properties = None
|
|
5778
|
+
body = None
|
|
5779
|
+
i = name_index + 1
|
|
5780
|
+
while i < len(tokens):
|
|
5781
|
+
tok = tokens[i]
|
|
5782
|
+
if tok.type == LPAREN:
|
|
5783
|
+
inner, close_idx = self._collect_enclosed_tokens(tokens, i, LPAREN, RPAREN)
|
|
5784
|
+
properties = self._parse_argument_list(inner)
|
|
5785
|
+
i = close_idx + 1
|
|
5786
|
+
continue
|
|
5787
|
+
if tok.type == ASSIGN:
|
|
5788
|
+
value_tokens = tokens[i + 1:]
|
|
5789
|
+
if value_tokens and value_tokens[-1].type == SEMICOLON:
|
|
5790
|
+
value_tokens = value_tokens[:-1]
|
|
5791
|
+
properties = self._parse_expression(value_tokens)
|
|
5792
|
+
break
|
|
5793
|
+
if tok.type == LBRACE:
|
|
5794
|
+
inner, close_idx = self._collect_enclosed_tokens(tokens, i, LBRACE, RBRACE)
|
|
5795
|
+
body_block = BlockStatement()
|
|
5796
|
+
body_block.statements = self._parse_block_statements(inner)
|
|
5797
|
+
body = body_block
|
|
5798
|
+
break
|
|
5799
|
+
if tok.type == SEMICOLON:
|
|
5800
|
+
break
|
|
5801
|
+
i += 1
|
|
5802
|
+
|
|
5803
|
+
return CanvasStatement(name, properties=properties, body=body)
|
|
5804
|
+
|
|
5805
|
+
def _parse_graphics_statement(self, block_info, all_tokens):
|
|
5806
|
+
"""Parse GRAPHICS statement for renderer overlays."""
|
|
5807
|
+
tokens = block_info.get('tokens', []) or []
|
|
5808
|
+
if len(tokens) < 2:
|
|
5809
|
+
return None
|
|
5810
|
+
|
|
5811
|
+
name = None
|
|
5812
|
+
name_index = None
|
|
5813
|
+
for idx in range(1, len(tokens)):
|
|
5814
|
+
tok = tokens[idx]
|
|
5815
|
+
if tok.type == IDENT:
|
|
5816
|
+
name = Identifier(tok.literal)
|
|
5817
|
+
name_index = idx
|
|
5818
|
+
break
|
|
5819
|
+
|
|
5820
|
+
if name is None:
|
|
5821
|
+
return None
|
|
5822
|
+
|
|
5823
|
+
body = None
|
|
5824
|
+
i = name_index + 1
|
|
5825
|
+
while i < len(tokens):
|
|
5826
|
+
tok = tokens[i]
|
|
5827
|
+
if tok.type == ASSIGN:
|
|
5828
|
+
value_tokens = tokens[i + 1:]
|
|
5829
|
+
if value_tokens and value_tokens[-1].type == SEMICOLON:
|
|
5830
|
+
value_tokens = value_tokens[:-1]
|
|
5831
|
+
expr = self._parse_expression(value_tokens)
|
|
5832
|
+
block = BlockStatement()
|
|
5833
|
+
if expr is not None:
|
|
5834
|
+
block.statements = [ExpressionStatement(expr)]
|
|
5835
|
+
body = block
|
|
5836
|
+
break
|
|
5837
|
+
if tok.type == LBRACE:
|
|
5838
|
+
inner, _ = self._collect_enclosed_tokens(tokens, i, LBRACE, RBRACE)
|
|
5839
|
+
block = BlockStatement()
|
|
5840
|
+
block.statements = self._parse_block_statements(inner)
|
|
5841
|
+
body = block
|
|
5842
|
+
break
|
|
5843
|
+
if tok.type == SEMICOLON:
|
|
5844
|
+
break
|
|
5845
|
+
i += 1
|
|
5846
|
+
|
|
5847
|
+
if body is None:
|
|
5848
|
+
body = BlockStatement()
|
|
5849
|
+
|
|
5850
|
+
return GraphicsStatement(name, body=body)
|
|
5851
|
+
|
|
5852
|
+
def _parse_animation_statement(self, block_info, all_tokens):
|
|
5853
|
+
"""Parse ANIMATION statement including properties and frames body."""
|
|
5854
|
+
tokens = block_info.get('tokens', []) or []
|
|
5855
|
+
if len(tokens) < 2:
|
|
5856
|
+
return None
|
|
5857
|
+
|
|
5858
|
+
name = None
|
|
5859
|
+
name_index = None
|
|
5860
|
+
for idx in range(1, len(tokens)):
|
|
5861
|
+
tok = tokens[idx]
|
|
5862
|
+
if tok.type == IDENT:
|
|
5863
|
+
name = Identifier(tok.literal)
|
|
5864
|
+
name_index = idx
|
|
5865
|
+
break
|
|
5866
|
+
|
|
5867
|
+
if name is None:
|
|
5868
|
+
return None
|
|
5869
|
+
|
|
5870
|
+
properties = None
|
|
5871
|
+
body = None
|
|
5872
|
+
i = name_index + 1
|
|
5873
|
+
while i < len(tokens):
|
|
5874
|
+
tok = tokens[i]
|
|
5875
|
+
if tok.type == LPAREN:
|
|
5876
|
+
inner, close_idx = self._collect_enclosed_tokens(tokens, i, LPAREN, RPAREN)
|
|
5877
|
+
properties = self._parse_argument_list(inner)
|
|
5878
|
+
i = close_idx + 1
|
|
5879
|
+
continue
|
|
5880
|
+
if tok.type == ASSIGN:
|
|
5881
|
+
value_tokens = tokens[i + 1:]
|
|
5882
|
+
if value_tokens and value_tokens[-1].type == SEMICOLON:
|
|
5883
|
+
value_tokens = value_tokens[:-1]
|
|
5884
|
+
properties = self._parse_expression(value_tokens)
|
|
5885
|
+
break
|
|
5886
|
+
if tok.type == LBRACE:
|
|
5887
|
+
inner, _ = self._collect_enclosed_tokens(tokens, i, LBRACE, RBRACE)
|
|
5888
|
+
block = BlockStatement()
|
|
5889
|
+
block.statements = self._parse_block_statements(inner)
|
|
5890
|
+
body = block
|
|
5891
|
+
break
|
|
5892
|
+
if tok.type == SEMICOLON:
|
|
5893
|
+
break
|
|
5894
|
+
i += 1
|
|
5895
|
+
|
|
5896
|
+
if body is None:
|
|
5897
|
+
body = BlockStatement()
|
|
5898
|
+
|
|
5899
|
+
return AnimationStatement(name, body=body, properties=properties)
|
|
5900
|
+
|
|
5901
|
+
def _parse_clock_statement(self, block_info, all_tokens):
|
|
5902
|
+
"""Parse CLOCK statement mapping configuration expressions."""
|
|
5903
|
+
tokens = block_info.get('tokens', []) or []
|
|
5904
|
+
if len(tokens) < 2:
|
|
5905
|
+
return None
|
|
5906
|
+
|
|
5907
|
+
name = None
|
|
5908
|
+
name_index = None
|
|
5909
|
+
for idx in range(1, len(tokens)):
|
|
5910
|
+
tok = tokens[idx]
|
|
5911
|
+
if tok.type == IDENT:
|
|
5912
|
+
name = Identifier(tok.literal)
|
|
5913
|
+
name_index = idx
|
|
5914
|
+
break
|
|
5915
|
+
|
|
5916
|
+
if name is None:
|
|
5917
|
+
return None
|
|
5918
|
+
|
|
5919
|
+
config = None
|
|
5920
|
+
i = name_index + 1
|
|
5921
|
+
while i < len(tokens):
|
|
5922
|
+
tok = tokens[i]
|
|
5923
|
+
if tok.type == LPAREN:
|
|
5924
|
+
inner, close_idx = self._collect_enclosed_tokens(tokens, i, LPAREN, RPAREN)
|
|
5925
|
+
config = self._parse_argument_list(inner)
|
|
5926
|
+
i = close_idx + 1
|
|
5927
|
+
continue
|
|
5928
|
+
if tok.type == ASSIGN:
|
|
5929
|
+
value_tokens = tokens[i + 1:]
|
|
5930
|
+
if value_tokens and value_tokens[-1].type == SEMICOLON:
|
|
5931
|
+
value_tokens = value_tokens[:-1]
|
|
5932
|
+
config = self._parse_expression(value_tokens)
|
|
5933
|
+
break
|
|
5934
|
+
if tok.type == LBRACE:
|
|
5935
|
+
inner, _ = self._collect_enclosed_tokens(tokens, i, LBRACE, RBRACE)
|
|
5936
|
+
block = BlockStatement()
|
|
5937
|
+
block.statements = self._parse_block_statements(inner)
|
|
5938
|
+
config = block
|
|
5939
|
+
break
|
|
5940
|
+
if tok.type == SEMICOLON:
|
|
5941
|
+
break
|
|
5942
|
+
i += 1
|
|
5943
|
+
|
|
5944
|
+
return ClockStatement(name, properties=config)
|
|
5945
|
+
|
|
5280
5946
|
def _parse_try_catch_context(self, block_info, all_tokens):
|
|
5281
5947
|
"""Parse try-catch block with full context awareness"""
|
|
5282
5948
|
parser_debug("🔧 [Context] Parsing try-catch block with context awareness")
|
|
@@ -5335,15 +6001,19 @@ class ContextStackParser:
|
|
|
5335
6001
|
while i < len(tokens) and tokens[i].type != RPAREN:
|
|
5336
6002
|
if tokens[i].type == IDENT:
|
|
5337
6003
|
# This is a parameter name
|
|
5338
|
-
|
|
6004
|
+
param_name = tokens[i].literal
|
|
6005
|
+
param_type = None
|
|
5339
6006
|
i += 1
|
|
5340
6007
|
|
|
5341
6008
|
# Check for type annotation: : type
|
|
5342
6009
|
if i < len(tokens) and tokens[i].type == COLON:
|
|
5343
6010
|
i += 1 # Skip COLON
|
|
5344
6011
|
if i < len(tokens) and tokens[i].type == IDENT:
|
|
6012
|
+
param_type = tokens[i].literal
|
|
5345
6013
|
i += 1 # Skip type name
|
|
5346
6014
|
|
|
6015
|
+
params.append(Identifier(param_name, type_annotation=param_type))
|
|
6016
|
+
|
|
5347
6017
|
# Check for comma (more parameters)
|
|
5348
6018
|
if i < len(tokens) and tokens[i].type == COMMA:
|
|
5349
6019
|
i += 1 # Skip COMMA
|
|
@@ -5460,13 +6130,21 @@ class ContextStackParser:
|
|
|
5460
6130
|
if not tokens:
|
|
5461
6131
|
return None
|
|
5462
6132
|
|
|
5463
|
-
# Check
|
|
6133
|
+
# Check for optional modifiers before the FUNCTION keyword (async, inline, native, etc.)
|
|
6134
|
+
modifier_types = {
|
|
6135
|
+
ASYNC, INLINE, NATIVE, PUBLIC, PRIVATE, SEALED, SECURE, PURE, VIEW, PAYABLE,
|
|
6136
|
+
}
|
|
6137
|
+
modifiers = []
|
|
5464
6138
|
is_async = False
|
|
5465
6139
|
start_idx = 0
|
|
5466
|
-
|
|
5467
|
-
|
|
5468
|
-
|
|
5469
|
-
|
|
6140
|
+
|
|
6141
|
+
while start_idx < len(tokens) and tokens[start_idx].type in modifier_types:
|
|
6142
|
+
modifier_token = tokens[start_idx]
|
|
6143
|
+
modifiers.append(getattr(modifier_token, 'literal', modifier_token.type))
|
|
6144
|
+
if modifier_token.type == ASYNC:
|
|
6145
|
+
is_async = True
|
|
6146
|
+
start_idx += 1
|
|
6147
|
+
|
|
5470
6148
|
# Now check for FUNCTION keyword
|
|
5471
6149
|
if start_idx >= len(tokens) or tokens[start_idx].type != FUNCTION:
|
|
5472
6150
|
return None
|
|
@@ -5535,8 +6213,8 @@ class ContextStackParser:
|
|
|
5535
6213
|
)
|
|
5536
6214
|
|
|
5537
6215
|
# Set async flag if modifier was present
|
|
5538
|
-
if
|
|
5539
|
-
func_stmt.modifiers =
|
|
6216
|
+
if modifiers:
|
|
6217
|
+
func_stmt.modifiers = modifiers
|
|
5540
6218
|
|
|
5541
6219
|
return func_stmt
|
|
5542
6220
|
|
|
@@ -5711,11 +6389,12 @@ class ContextStackParser:
|
|
|
5711
6389
|
return statements[0] # Return the first (and likely only) statement
|
|
5712
6390
|
return None
|
|
5713
6391
|
|
|
5714
|
-
# Check if this is an assignment statement (identifier = value
|
|
5715
|
-
# Look for ASSIGN token anywhere in the statement
|
|
6392
|
+
# Check if this is an assignment statement (identifier = value, +=, -=, *=, /=, %=, **=)
|
|
6393
|
+
# Look for ASSIGN or compound assignment token anywhere in the statement
|
|
6394
|
+
_compound_types = {PLUS_ASSIGN, MINUS_ASSIGN, STAR_ASSIGN, SLASH_ASSIGN, MOD_ASSIGN, POWER_ASSIGN}
|
|
5716
6395
|
assign_idx = None
|
|
5717
6396
|
for i, tok in enumerate(tokens):
|
|
5718
|
-
if tok.type == ASSIGN:
|
|
6397
|
+
if tok.type == ASSIGN or tok.type in _compound_types:
|
|
5719
6398
|
assign_idx = i
|
|
5720
6399
|
break
|
|
5721
6400
|
|
|
@@ -6855,6 +7534,24 @@ class ContextStackParser:
|
|
|
6855
7534
|
parser_debug(" ✅ Limit statement")
|
|
6856
7535
|
return LimitStatement(amount=gas_limit)
|
|
6857
7536
|
|
|
7537
|
+
def _parse_gc_statement_block(self, block_info, all_tokens):
|
|
7538
|
+
"""Parse gc "action" statements."""
|
|
7539
|
+
parser_debug("🔧 [Context] Parsing gc statement")
|
|
7540
|
+
tokens = block_info.get('tokens', [])
|
|
7541
|
+
|
|
7542
|
+
if not tokens or tokens[0].type != GC:
|
|
7543
|
+
parser_debug(" ❌ Expected GC keyword")
|
|
7544
|
+
return None
|
|
7545
|
+
|
|
7546
|
+
if len(tokens) < 2 or tokens[1].type != STRING:
|
|
7547
|
+
parser_debug(" ⚠️ GC statement missing string literal action")
|
|
7548
|
+
expr = self._parse_expression(tokens)
|
|
7549
|
+
return ExpressionStatement(expr) if expr else None
|
|
7550
|
+
|
|
7551
|
+
action = tokens[1].literal
|
|
7552
|
+
parser_debug(f" ✅ GC statement action='{action}'")
|
|
7553
|
+
return GCStatement(action)
|
|
7554
|
+
|
|
6858
7555
|
def _parse_stream_statement(self, block_info, all_tokens):
|
|
6859
7556
|
"""Parse stream statement.
|
|
6860
7557
|
|