zexus 1.6.4 → 1.6.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +1 -1
- package/src/zexus/__init__.py +1 -1
- package/src/zexus/cli/main.py +1 -1
- package/src/zexus/cli/zpm.py +1 -1
- package/src/zexus/evaluator/statements.py +25 -5
- package/src/zexus/lexer.py +1 -1
- package/src/zexus/parser/parser.py +37 -0
- package/src/zexus/parser/strategy_context.py +24 -1
- package/src/zexus/parser/strategy_structural.py +30 -5
- package/src/zexus/zpm/package_manager.py +1 -1
- package/src/zexus.egg-info/PKG-INFO +2 -2
- package/src/zexus.egg-info/SOURCES.txt +1 -16
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
<div align="center">
|
|
4
4
|
|
|
5
|
-

|
|
6
6
|
[](LICENSE)
|
|
7
7
|
[](https://python.org)
|
|
8
8
|
[](https://github.com/Zaidux/zexus-interpreter)
|
package/package.json
CHANGED
package/src/zexus/__init__.py
CHANGED
package/src/zexus/cli/main.py
CHANGED
|
@@ -91,7 +91,7 @@ def show_all_commands():
|
|
|
91
91
|
console.print("\n[bold green]💡 Tip:[/bold green] Use 'zx <command> --help' for detailed command options\n")
|
|
92
92
|
|
|
93
93
|
@click.group(invoke_without_command=True)
|
|
94
|
-
@click.version_option(version="1.6.
|
|
94
|
+
@click.version_option(version="1.6.5", prog_name="Zexus")
|
|
95
95
|
@click.option('--syntax-style', type=click.Choice(['universal', 'tolerable', 'auto']),
|
|
96
96
|
default='auto', help='Syntax style to use (universal=strict, tolerable=flexible)')
|
|
97
97
|
@click.option('--advanced-parsing', is_flag=True, default=True,
|
package/src/zexus/cli/zpm.py
CHANGED
|
@@ -221,7 +221,7 @@ class StatementEvaluatorMixin:
|
|
|
221
221
|
return value
|
|
222
222
|
|
|
223
223
|
# Set as const in environment
|
|
224
|
-
env.
|
|
224
|
+
env.set(node.name.value, value)
|
|
225
225
|
return NULL
|
|
226
226
|
|
|
227
227
|
def eval_data_statement(self, node, env, stack_trace):
|
|
@@ -337,6 +337,14 @@ class StatementEvaluatorMixin:
|
|
|
337
337
|
instance.pairs[String("__immutable__")] = Boolean(is_immutable)
|
|
338
338
|
instance.pairs[String("__verified__")] = Boolean(is_verified)
|
|
339
339
|
|
|
340
|
+
# Check if single argument is a Map (from MapLiteral syntax like Block{index: 42})
|
|
341
|
+
# If so, extract field values from the map instead of treating it as positional args
|
|
342
|
+
kwargs = None
|
|
343
|
+
if len(args) == 1 and isinstance(args[0], Map):
|
|
344
|
+
# Extract keyword arguments from the Map
|
|
345
|
+
kwargs = args[0].pairs
|
|
346
|
+
debug_log("dataclass_constructor", f"Extracted {len(kwargs)} kwargs from Map")
|
|
347
|
+
|
|
340
348
|
# Process each field with validation (parent fields first, then child fields)
|
|
341
349
|
arg_index = 0
|
|
342
350
|
for field in all_fields:
|
|
@@ -348,8 +356,20 @@ class StatementEvaluatorMixin:
|
|
|
348
356
|
|
|
349
357
|
field_value = NULL
|
|
350
358
|
|
|
351
|
-
# Get value from
|
|
352
|
-
if
|
|
359
|
+
# Get value from keyword args (map syntax) or positional args
|
|
360
|
+
if kwargs is not None:
|
|
361
|
+
# Try to get value from keyword arguments (map)
|
|
362
|
+
field_value = kwargs.get(field_name, NULL)
|
|
363
|
+
if field_value == NULL:
|
|
364
|
+
# Try with String key
|
|
365
|
+
field_value = kwargs.get(String(field_name), NULL)
|
|
366
|
+
if field_value == NULL and field.default_value is not None:
|
|
367
|
+
# Use default if not provided
|
|
368
|
+
field_value = evaluator_self.eval_node(field.default_value, parent_env, stack_trace)
|
|
369
|
+
if is_error(field_value):
|
|
370
|
+
return field_value
|
|
371
|
+
elif arg_index < len(args):
|
|
372
|
+
# Positional argument
|
|
353
373
|
field_value = args[arg_index]
|
|
354
374
|
arg_index += 1
|
|
355
375
|
elif field.default_value is not None:
|
|
@@ -702,10 +722,10 @@ class StatementEvaluatorMixin:
|
|
|
702
722
|
"default": Builtin(default_static)
|
|
703
723
|
}
|
|
704
724
|
|
|
705
|
-
# Register constructor in environment
|
|
725
|
+
# Register constructor in environment
|
|
706
726
|
# For specialized generics (e.g., Box<number>), don't fail if already registered
|
|
707
727
|
try:
|
|
708
|
-
env.
|
|
728
|
+
env.set(type_name, constructor)
|
|
709
729
|
except ValueError as e:
|
|
710
730
|
# If it's a specialized generic that's already registered, just return the existing one
|
|
711
731
|
if '<' in type_name and '>' in type_name:
|
package/src/zexus/lexer.py
CHANGED
|
@@ -473,7 +473,7 @@ class Lexer:
|
|
|
473
473
|
"break": BREAK, # NEW: Break loop keyword
|
|
474
474
|
"throw": THROW, # NEW: Throw error keyword
|
|
475
475
|
"external": EXTERNAL, # NEW: External keyword
|
|
476
|
-
"from": FROM, #
|
|
476
|
+
# "from": FROM, # NOT a keyword - only recognized contextually in import statements
|
|
477
477
|
"screen": SCREEN, # NEW: renderer keyword
|
|
478
478
|
"component": COMPONENT, # NEW: renderer keyword
|
|
479
479
|
"theme": THEME, # NEW: renderer keyword
|
|
@@ -2666,6 +2666,43 @@ class UltimateParser:
|
|
|
2666
2666
|
not self.peek_token_is(RBRACKET) and
|
|
2667
2667
|
precedence <= self.peek_precedence()):
|
|
2668
2668
|
|
|
2669
|
+
# CRITICAL FIX: Stop if next token is on a new line and could start a new statement
|
|
2670
|
+
# This prevents expressions from spanning multiple logical lines
|
|
2671
|
+
if self.cur_token.line < self.peek_token.line:
|
|
2672
|
+
# Next token is on a new line - check if it could start a new statement
|
|
2673
|
+
next_could_be_statement = (
|
|
2674
|
+
self.peek_token.type == IDENT or
|
|
2675
|
+
self.peek_token.type == LET or
|
|
2676
|
+
self.peek_token.type == CONST or
|
|
2677
|
+
self.peek_token.type == RETURN or
|
|
2678
|
+
self.peek_token.type == IF or
|
|
2679
|
+
self.peek_token.type == WHILE or
|
|
2680
|
+
self.peek_token.type == FOR
|
|
2681
|
+
)
|
|
2682
|
+
if next_could_be_statement:
|
|
2683
|
+
# Additional check: is the next token followed by [ or = ?
|
|
2684
|
+
# This would indicate it's an assignment/index expression starting
|
|
2685
|
+
if self.peek_token.type == IDENT:
|
|
2686
|
+
# Save current state to peek ahead
|
|
2687
|
+
saved_cur = self.cur_token
|
|
2688
|
+
saved_peek = self.peek_token
|
|
2689
|
+
saved_pos = self.cur_pos
|
|
2690
|
+
|
|
2691
|
+
# Peek ahead one more token
|
|
2692
|
+
self.next_token() # Now peek_token is what we want to check
|
|
2693
|
+
next_next = self.peek_token
|
|
2694
|
+
|
|
2695
|
+
# Restore state
|
|
2696
|
+
self.cur_token = saved_cur
|
|
2697
|
+
self.peek_token = saved_peek
|
|
2698
|
+
self.cur_pos = saved_pos
|
|
2699
|
+
|
|
2700
|
+
# If next token after IDENT is LBRACKET or ASSIGN, it's likely a new statement
|
|
2701
|
+
if next_next.type in (LBRACKET, ASSIGN, LPAREN):
|
|
2702
|
+
break
|
|
2703
|
+
else:
|
|
2704
|
+
break
|
|
2705
|
+
|
|
2669
2706
|
if self.peek_token.type not in self.infix_parse_fns:
|
|
2670
2707
|
return left_exp
|
|
2671
2708
|
|
|
@@ -3392,6 +3392,11 @@ class ContextStackParser:
|
|
|
3392
3392
|
# E.g., after RPAREN (end of function call) or after a complete value
|
|
3393
3393
|
prev_token = run_tokens[-1] if run_tokens else None
|
|
3394
3394
|
if prev_token and prev_token.type not in {DOT, LPAREN, LBRACKET, LBRACE, ASSIGN}:
|
|
3395
|
+
# CRITICAL: Also check for newline - new line + IDENT often indicates new statement
|
|
3396
|
+
last_line = prev_token.line if hasattr(prev_token, 'line') else 0
|
|
3397
|
+
current_line = t.line if hasattr(t, 'line') else 0
|
|
3398
|
+
is_new_line = current_line > last_line
|
|
3399
|
+
|
|
3395
3400
|
# Check if this starts a new statement (assignment or function call)
|
|
3396
3401
|
k = j + 1
|
|
3397
3402
|
is_new_statement_start = False
|
|
@@ -3404,6 +3409,22 @@ class ContextStackParser:
|
|
|
3404
3409
|
# Assignment: ident = or ident.prop =
|
|
3405
3410
|
elif next_tok.type == ASSIGN:
|
|
3406
3411
|
is_new_statement_start = True
|
|
3412
|
+
# CRITICAL FIX: Indexed assignment: ident[...] =
|
|
3413
|
+
elif next_tok.type == LBRACKET:
|
|
3414
|
+
# Scan for matching RBRACKET followed by ASSIGN
|
|
3415
|
+
bracket_depth = 1
|
|
3416
|
+
scan_idx = k + 1
|
|
3417
|
+
while scan_idx < len(tokens) and scan_idx < k + 20:
|
|
3418
|
+
if tokens[scan_idx].type == LBRACKET:
|
|
3419
|
+
bracket_depth += 1
|
|
3420
|
+
elif tokens[scan_idx].type == RBRACKET:
|
|
3421
|
+
bracket_depth -= 1
|
|
3422
|
+
if bracket_depth == 0:
|
|
3423
|
+
# Found matching closing bracket, check for ASSIGN
|
|
3424
|
+
if scan_idx + 1 < len(tokens) and tokens[scan_idx + 1].type == ASSIGN:
|
|
3425
|
+
is_new_statement_start = True
|
|
3426
|
+
break
|
|
3427
|
+
scan_idx += 1
|
|
3407
3428
|
elif next_tok.type == DOT:
|
|
3408
3429
|
# Property assignment: scan for ASSIGN
|
|
3409
3430
|
while k < len(tokens) and k < j + 10:
|
|
@@ -3419,7 +3440,9 @@ class ContextStackParser:
|
|
|
3419
3440
|
else:
|
|
3420
3441
|
break
|
|
3421
3442
|
|
|
3422
|
-
if
|
|
3443
|
+
# Break if this is a new statement AND on a new line
|
|
3444
|
+
# (or if we're sure it's a new statement regardless of line)
|
|
3445
|
+
if is_new_statement_start and (is_new_line or prev_token.type == RPAREN):
|
|
3423
3446
|
break
|
|
3424
3447
|
|
|
3425
3448
|
# update nesting for parentheses/brackets/braces
|
|
@@ -728,9 +728,11 @@ class StructuralAnalyzer:
|
|
|
728
728
|
if tj.line > last_line:
|
|
729
729
|
# Check if we have balanced parens in run_tokens (statement is syntactically complete)
|
|
730
730
|
paren_count = sum(1 if tok.type == LPAREN else -1 if tok.type == RPAREN else 0 for tok in run_tokens)
|
|
731
|
-
if
|
|
731
|
+
bracket_count = sum(1 if tok.type == LBRACKET else -1 if tok.type == RBRACKET else 0 for tok in run_tokens)
|
|
732
|
+
if paren_count == 0 and bracket_count == 0:
|
|
732
733
|
# Check if run_tokens contains an assignment (this is a complete assignment statement)
|
|
733
734
|
has_assign = any(tok.type == ASSIGN for tok in run_tokens)
|
|
735
|
+
print(f" has_assign={has_assign}, tj.type={tj.type}")
|
|
734
736
|
if has_assign:
|
|
735
737
|
# Current token is on a new line and could start a new statement
|
|
736
738
|
# Check if it's IDENT (could be method call, function call, or property access)
|
|
@@ -738,8 +740,10 @@ class StructuralAnalyzer:
|
|
|
738
740
|
# CRITICAL FIX: Don't break if the previous token was ASSIGN
|
|
739
741
|
# This means the IDENT is the RHS value, not a new statement
|
|
740
742
|
prev_tok = run_tokens[-1] if run_tokens else None
|
|
743
|
+
print(f" prev_tok={prev_tok.literal if prev_tok else None}, type={prev_tok.type if prev_tok else None}")
|
|
741
744
|
if prev_tok and prev_tok.type == ASSIGN:
|
|
742
745
|
# This IDENT is the RHS of the assignment, not a new statement
|
|
746
|
+
print(f" -> Continuing (RHS of assignment)")
|
|
743
747
|
pass # Don't break, continue collecting
|
|
744
748
|
else:
|
|
745
749
|
# This is likely a new statement on a new line
|
|
@@ -760,6 +764,26 @@ class StructuralAnalyzer:
|
|
|
760
764
|
# Look ahead: IDENT DOT IDENT ASSIGN is a property assignment
|
|
761
765
|
if j + 3 < n and tokens[j + 2].type == IDENT and tokens[j + 3].type == ASSIGN:
|
|
762
766
|
is_assignment_start = True
|
|
767
|
+
# Pattern 3: IDENT followed by LBRACKET could be indexed assignment (arr[i] = ...)
|
|
768
|
+
elif tj.type == IDENT and j + 1 < n and tokens[j + 1].type == LBRACKET:
|
|
769
|
+
# Look ahead to find matching RBRACKET and then ASSIGN
|
|
770
|
+
# This pattern is: IDENT [ ... ] ASSIGN
|
|
771
|
+
bracket_depth = 0
|
|
772
|
+
k = j + 1
|
|
773
|
+
found_assign_after_bracket = False
|
|
774
|
+
while k < n:
|
|
775
|
+
if tokens[k].type == LBRACKET:
|
|
776
|
+
bracket_depth += 1
|
|
777
|
+
elif tokens[k].type == RBRACKET:
|
|
778
|
+
bracket_depth -= 1
|
|
779
|
+
if bracket_depth == 0:
|
|
780
|
+
# Found matching closing bracket, check if next is ASSIGN
|
|
781
|
+
if k + 1 < n and tokens[k + 1].type == ASSIGN:
|
|
782
|
+
found_assign_after_bracket = True
|
|
783
|
+
break
|
|
784
|
+
k += 1
|
|
785
|
+
if found_assign_after_bracket:
|
|
786
|
+
is_assignment_start = True
|
|
763
787
|
|
|
764
788
|
is_new_statement = (
|
|
765
789
|
tj.type in stop_types or
|
|
@@ -993,11 +1017,12 @@ class StructuralAnalyzer:
|
|
|
993
1017
|
continue
|
|
994
1018
|
|
|
995
1019
|
# NEW: Check for line-based statement boundaries
|
|
996
|
-
# If we have balanced parens and the next token is on a new line and could start a new statement, create boundary
|
|
1020
|
+
# If we have balanced parens/brackets and the next token is on a new line and could start a new statement, create boundary
|
|
997
1021
|
if cur:
|
|
998
|
-
# Check if parens are balanced
|
|
1022
|
+
# Check if parens and brackets are balanced
|
|
999
1023
|
paren_count = sum(1 if tok.type == LPAREN else -1 if tok.type == RPAREN else 0 for tok in cur)
|
|
1000
|
-
if
|
|
1024
|
+
bracket_count = sum(1 if tok.type == LBRACKET else -1 if tok.type == RBRACKET else 0 for tok in cur)
|
|
1025
|
+
if paren_count == 0 and bracket_count == 0:
|
|
1001
1026
|
# Check if there's an ASSIGN in cur (this is a complete assignment statement)
|
|
1002
1027
|
has_assign = any(tok.type == ASSIGN for tok in cur)
|
|
1003
1028
|
if has_assign:
|
|
@@ -1005,7 +1030,7 @@ class StructuralAnalyzer:
|
|
|
1005
1030
|
last_line = cur[-1].line if cur else 0
|
|
1006
1031
|
if t.line > last_line:
|
|
1007
1032
|
# Check if current token could start a new statement
|
|
1008
|
-
# IDENT
|
|
1033
|
+
# IDENT could be a new statement (including indexed assignments like map[key] = val)
|
|
1009
1034
|
if t.type == IDENT:
|
|
1010
1035
|
# This is likely a new statement on a new line
|
|
1011
1036
|
results.append(cur)
|
|
@@ -23,7 +23,7 @@ class PackageManager:
|
|
|
23
23
|
self.installer = PackageInstaller(self.zpm_dir)
|
|
24
24
|
self.publisher = PackagePublisher(self.registry)
|
|
25
25
|
|
|
26
|
-
def init(self, name: str = None, version: str = "1.6.
|
|
26
|
+
def init(self, name: str = None, version: str = "1.6.5") -> Dict:
|
|
27
27
|
"""Initialize a new Zexus project with package.json"""
|
|
28
28
|
if self.config_file.exists():
|
|
29
29
|
print(f"⚠️ {self.config_file} already exists")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: zexus
|
|
3
|
-
Version: 1.6.
|
|
3
|
+
Version: 1.6.5
|
|
4
4
|
Summary: A modern, security-first programming language with blockchain support
|
|
5
5
|
Home-page: https://github.com/Zaidux/zexus-interpreter
|
|
6
6
|
Author: Zaidux
|
|
@@ -50,7 +50,7 @@ Dynamic: requires-python
|
|
|
50
50
|
|
|
51
51
|
<div align="center">
|
|
52
52
|
|
|
53
|
-

|
|
54
54
|
[](LICENSE)
|
|
55
55
|
[](https://python.org)
|
|
56
56
|
[](https://github.com/Zaidux/zexus-interpreter)
|
|
@@ -14,7 +14,6 @@ any.zx
|
|
|
14
14
|
check_verify_ast.py
|
|
15
15
|
comprehensive_test.zx
|
|
16
16
|
crypto.zx
|
|
17
|
-
debug_parse.py
|
|
18
17
|
debug_persist_ultimate.zx
|
|
19
18
|
demo_backend_server.zx
|
|
20
19
|
demo_backend_simple.zx
|
|
@@ -31,22 +30,7 @@ setup.cfg
|
|
|
31
30
|
setup.py
|
|
32
31
|
setup_stdlib.sh
|
|
33
32
|
shared_config.json
|
|
34
|
-
test_const_time_debug.zx
|
|
35
|
-
test_contract_assignment.zx
|
|
36
|
-
test_contract_debug.zx
|
|
37
|
-
test_contract_map.zx
|
|
38
33
|
test_data.json
|
|
39
|
-
test_entity_debug.zx
|
|
40
|
-
test_map_assignment.zx
|
|
41
|
-
test_map_debug.zx
|
|
42
|
-
test_map_len.zx
|
|
43
|
-
test_map_persistence.zx
|
|
44
|
-
test_nested_map_assignment.zx
|
|
45
|
-
test_simple_contract.zx
|
|
46
|
-
test_sqlite_python.py
|
|
47
|
-
test_state_variable_type.zx
|
|
48
|
-
test_storage_init.zx
|
|
49
|
-
test_storage_types.zx
|
|
50
34
|
ultimate_test.zx
|
|
51
35
|
zexus.json
|
|
52
36
|
zpics
|
|
@@ -363,6 +347,7 @@ examples/test_postgres.zx
|
|
|
363
347
|
examples/test_sqlite.zx
|
|
364
348
|
examples/token_contract.zx
|
|
365
349
|
examples/ziver_chain_test.zx
|
|
350
|
+
issues/ISSUE2.md
|
|
366
351
|
issues/ISSUSE1.md
|
|
367
352
|
linguist-submission/SUBMISSION_INSTRUCTIONS.md
|
|
368
353
|
linguist-submission/grammars.yml
|