zexus 1.6.4 → 1.6.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +1 -1
- package/src/zexus/__init__.py +1 -1
- package/src/zexus/cli/main.py +1 -1
- package/src/zexus/cli/zpm.py +1 -1
- package/src/zexus/evaluator/statements.py +25 -5
- package/src/zexus/lexer.py +1 -1
- package/src/zexus/parser/parser.py +37 -0
- package/src/zexus/parser/strategy_context.py +24 -1
- package/src/zexus/parser/strategy_structural.py +51 -6
- package/src/zexus/zpm/package_manager.py +1 -1
- package/src/zexus.egg-info/PKG-INFO +2 -2
- package/src/zexus.egg-info/SOURCES.txt +2 -16
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
<div align="center">
|
|
4
4
|
|
|
5
|
-

|
|
6
6
|
[](LICENSE)
|
|
7
7
|
[](https://python.org)
|
|
8
8
|
[](https://github.com/Zaidux/zexus-interpreter)
|
package/package.json
CHANGED
package/src/zexus/__init__.py
CHANGED
package/src/zexus/cli/main.py
CHANGED
|
@@ -91,7 +91,7 @@ def show_all_commands():
|
|
|
91
91
|
console.print("\n[bold green]💡 Tip:[/bold green] Use 'zx <command> --help' for detailed command options\n")
|
|
92
92
|
|
|
93
93
|
@click.group(invoke_without_command=True)
|
|
94
|
-
@click.version_option(version="1.6.
|
|
94
|
+
@click.version_option(version="1.6.6", prog_name="Zexus")
|
|
95
95
|
@click.option('--syntax-style', type=click.Choice(['universal', 'tolerable', 'auto']),
|
|
96
96
|
default='auto', help='Syntax style to use (universal=strict, tolerable=flexible)')
|
|
97
97
|
@click.option('--advanced-parsing', is_flag=True, default=True,
|
package/src/zexus/cli/zpm.py
CHANGED
|
@@ -221,7 +221,7 @@ class StatementEvaluatorMixin:
|
|
|
221
221
|
return value
|
|
222
222
|
|
|
223
223
|
# Set as const in environment
|
|
224
|
-
env.
|
|
224
|
+
env.set(node.name.value, value)
|
|
225
225
|
return NULL
|
|
226
226
|
|
|
227
227
|
def eval_data_statement(self, node, env, stack_trace):
|
|
@@ -337,6 +337,14 @@ class StatementEvaluatorMixin:
|
|
|
337
337
|
instance.pairs[String("__immutable__")] = Boolean(is_immutable)
|
|
338
338
|
instance.pairs[String("__verified__")] = Boolean(is_verified)
|
|
339
339
|
|
|
340
|
+
# Check if single argument is a Map (from MapLiteral syntax like Block{index: 42})
|
|
341
|
+
# If so, extract field values from the map instead of treating it as positional args
|
|
342
|
+
kwargs = None
|
|
343
|
+
if len(args) == 1 and isinstance(args[0], Map):
|
|
344
|
+
# Extract keyword arguments from the Map
|
|
345
|
+
kwargs = args[0].pairs
|
|
346
|
+
debug_log("dataclass_constructor", f"Extracted {len(kwargs)} kwargs from Map")
|
|
347
|
+
|
|
340
348
|
# Process each field with validation (parent fields first, then child fields)
|
|
341
349
|
arg_index = 0
|
|
342
350
|
for field in all_fields:
|
|
@@ -348,8 +356,20 @@ class StatementEvaluatorMixin:
|
|
|
348
356
|
|
|
349
357
|
field_value = NULL
|
|
350
358
|
|
|
351
|
-
# Get value from
|
|
352
|
-
if
|
|
359
|
+
# Get value from keyword args (map syntax) or positional args
|
|
360
|
+
if kwargs is not None:
|
|
361
|
+
# Try to get value from keyword arguments (map)
|
|
362
|
+
field_value = kwargs.get(field_name, NULL)
|
|
363
|
+
if field_value == NULL:
|
|
364
|
+
# Try with String key
|
|
365
|
+
field_value = kwargs.get(String(field_name), NULL)
|
|
366
|
+
if field_value == NULL and field.default_value is not None:
|
|
367
|
+
# Use default if not provided
|
|
368
|
+
field_value = evaluator_self.eval_node(field.default_value, parent_env, stack_trace)
|
|
369
|
+
if is_error(field_value):
|
|
370
|
+
return field_value
|
|
371
|
+
elif arg_index < len(args):
|
|
372
|
+
# Positional argument
|
|
353
373
|
field_value = args[arg_index]
|
|
354
374
|
arg_index += 1
|
|
355
375
|
elif field.default_value is not None:
|
|
@@ -702,10 +722,10 @@ class StatementEvaluatorMixin:
|
|
|
702
722
|
"default": Builtin(default_static)
|
|
703
723
|
}
|
|
704
724
|
|
|
705
|
-
# Register constructor in environment
|
|
725
|
+
# Register constructor in environment
|
|
706
726
|
# For specialized generics (e.g., Box<number>), don't fail if already registered
|
|
707
727
|
try:
|
|
708
|
-
env.
|
|
728
|
+
env.set(type_name, constructor)
|
|
709
729
|
except ValueError as e:
|
|
710
730
|
# If it's a specialized generic that's already registered, just return the existing one
|
|
711
731
|
if '<' in type_name and '>' in type_name:
|
package/src/zexus/lexer.py
CHANGED
|
@@ -473,7 +473,7 @@ class Lexer:
|
|
|
473
473
|
"break": BREAK, # NEW: Break loop keyword
|
|
474
474
|
"throw": THROW, # NEW: Throw error keyword
|
|
475
475
|
"external": EXTERNAL, # NEW: External keyword
|
|
476
|
-
"from": FROM, #
|
|
476
|
+
# "from": FROM, # NOT a keyword - only recognized contextually in import statements
|
|
477
477
|
"screen": SCREEN, # NEW: renderer keyword
|
|
478
478
|
"component": COMPONENT, # NEW: renderer keyword
|
|
479
479
|
"theme": THEME, # NEW: renderer keyword
|
|
@@ -2666,6 +2666,43 @@ class UltimateParser:
|
|
|
2666
2666
|
not self.peek_token_is(RBRACKET) and
|
|
2667
2667
|
precedence <= self.peek_precedence()):
|
|
2668
2668
|
|
|
2669
|
+
# CRITICAL FIX: Stop if next token is on a new line and could start a new statement
|
|
2670
|
+
# This prevents expressions from spanning multiple logical lines
|
|
2671
|
+
if self.cur_token.line < self.peek_token.line:
|
|
2672
|
+
# Next token is on a new line - check if it could start a new statement
|
|
2673
|
+
next_could_be_statement = (
|
|
2674
|
+
self.peek_token.type == IDENT or
|
|
2675
|
+
self.peek_token.type == LET or
|
|
2676
|
+
self.peek_token.type == CONST or
|
|
2677
|
+
self.peek_token.type == RETURN or
|
|
2678
|
+
self.peek_token.type == IF or
|
|
2679
|
+
self.peek_token.type == WHILE or
|
|
2680
|
+
self.peek_token.type == FOR
|
|
2681
|
+
)
|
|
2682
|
+
if next_could_be_statement:
|
|
2683
|
+
# Additional check: is the next token followed by [ or = ?
|
|
2684
|
+
# This would indicate it's an assignment/index expression starting
|
|
2685
|
+
if self.peek_token.type == IDENT:
|
|
2686
|
+
# Save current state to peek ahead
|
|
2687
|
+
saved_cur = self.cur_token
|
|
2688
|
+
saved_peek = self.peek_token
|
|
2689
|
+
saved_pos = self.cur_pos
|
|
2690
|
+
|
|
2691
|
+
# Peek ahead one more token
|
|
2692
|
+
self.next_token() # Now peek_token is what we want to check
|
|
2693
|
+
next_next = self.peek_token
|
|
2694
|
+
|
|
2695
|
+
# Restore state
|
|
2696
|
+
self.cur_token = saved_cur
|
|
2697
|
+
self.peek_token = saved_peek
|
|
2698
|
+
self.cur_pos = saved_pos
|
|
2699
|
+
|
|
2700
|
+
# If next token after IDENT is LBRACKET or ASSIGN, it's likely a new statement
|
|
2701
|
+
if next_next.type in (LBRACKET, ASSIGN, LPAREN):
|
|
2702
|
+
break
|
|
2703
|
+
else:
|
|
2704
|
+
break
|
|
2705
|
+
|
|
2669
2706
|
if self.peek_token.type not in self.infix_parse_fns:
|
|
2670
2707
|
return left_exp
|
|
2671
2708
|
|
|
@@ -3392,6 +3392,11 @@ class ContextStackParser:
|
|
|
3392
3392
|
# E.g., after RPAREN (end of function call) or after a complete value
|
|
3393
3393
|
prev_token = run_tokens[-1] if run_tokens else None
|
|
3394
3394
|
if prev_token and prev_token.type not in {DOT, LPAREN, LBRACKET, LBRACE, ASSIGN}:
|
|
3395
|
+
# CRITICAL: Also check for newline - new line + IDENT often indicates new statement
|
|
3396
|
+
last_line = prev_token.line if hasattr(prev_token, 'line') else 0
|
|
3397
|
+
current_line = t.line if hasattr(t, 'line') else 0
|
|
3398
|
+
is_new_line = current_line > last_line
|
|
3399
|
+
|
|
3395
3400
|
# Check if this starts a new statement (assignment or function call)
|
|
3396
3401
|
k = j + 1
|
|
3397
3402
|
is_new_statement_start = False
|
|
@@ -3404,6 +3409,22 @@ class ContextStackParser:
|
|
|
3404
3409
|
# Assignment: ident = or ident.prop =
|
|
3405
3410
|
elif next_tok.type == ASSIGN:
|
|
3406
3411
|
is_new_statement_start = True
|
|
3412
|
+
# CRITICAL FIX: Indexed assignment: ident[...] =
|
|
3413
|
+
elif next_tok.type == LBRACKET:
|
|
3414
|
+
# Scan for matching RBRACKET followed by ASSIGN
|
|
3415
|
+
bracket_depth = 1
|
|
3416
|
+
scan_idx = k + 1
|
|
3417
|
+
while scan_idx < len(tokens) and scan_idx < k + 20:
|
|
3418
|
+
if tokens[scan_idx].type == LBRACKET:
|
|
3419
|
+
bracket_depth += 1
|
|
3420
|
+
elif tokens[scan_idx].type == RBRACKET:
|
|
3421
|
+
bracket_depth -= 1
|
|
3422
|
+
if bracket_depth == 0:
|
|
3423
|
+
# Found matching closing bracket, check for ASSIGN
|
|
3424
|
+
if scan_idx + 1 < len(tokens) and tokens[scan_idx + 1].type == ASSIGN:
|
|
3425
|
+
is_new_statement_start = True
|
|
3426
|
+
break
|
|
3427
|
+
scan_idx += 1
|
|
3407
3428
|
elif next_tok.type == DOT:
|
|
3408
3429
|
# Property assignment: scan for ASSIGN
|
|
3409
3430
|
while k < len(tokens) and k < j + 10:
|
|
@@ -3419,7 +3440,9 @@ class ContextStackParser:
|
|
|
3419
3440
|
else:
|
|
3420
3441
|
break
|
|
3421
3442
|
|
|
3422
|
-
if
|
|
3443
|
+
# Break if this is a new statement AND on a new line
|
|
3444
|
+
# (or if we're sure it's a new statement regardless of line)
|
|
3445
|
+
if is_new_statement_start and (is_new_line or prev_token.type == RPAREN):
|
|
3423
3446
|
break
|
|
3424
3447
|
|
|
3425
3448
|
# update nesting for parentheses/brackets/braces
|
|
@@ -728,18 +728,41 @@ class StructuralAnalyzer:
|
|
|
728
728
|
if tj.line > last_line:
|
|
729
729
|
# Check if we have balanced parens in run_tokens (statement is syntactically complete)
|
|
730
730
|
paren_count = sum(1 if tok.type == LPAREN else -1 if tok.type == RPAREN else 0 for tok in run_tokens)
|
|
731
|
-
if
|
|
731
|
+
bracket_count = sum(1 if tok.type == LBRACKET else -1 if tok.type == RBRACKET else 0 for tok in run_tokens)
|
|
732
|
+
if paren_count == 0 and bracket_count == 0:
|
|
732
733
|
# Check if run_tokens contains an assignment (this is a complete assignment statement)
|
|
733
734
|
has_assign = any(tok.type == ASSIGN for tok in run_tokens)
|
|
734
|
-
|
|
735
|
+
|
|
736
|
+
# CRITICAL FIX: Also check if this is a method call OR indexed assignment statement
|
|
737
|
+
# Method call: IDENT DOT IDENT LPAREN
|
|
738
|
+
# Indexed assignment: IDENT LBRACKET ... RBRACKET ASSIGN
|
|
739
|
+
is_method_call_stmt = False
|
|
740
|
+
is_indexed_assign_stmt = False
|
|
741
|
+
|
|
742
|
+
if len(run_tokens) >= 4:
|
|
743
|
+
# Check for method call pattern: IDENT DOT IDENT LPAREN ... RPAREN
|
|
744
|
+
has_dot = any(tok.type == DOT for tok in run_tokens)
|
|
745
|
+
if has_dot and run_tokens[0].type == IDENT:
|
|
746
|
+
is_method_call_stmt = True
|
|
747
|
+
|
|
748
|
+
# Check for indexed assignment pattern: IDENT LBRACKET ... RBRACKET ASSIGN ...
|
|
749
|
+
# Look for IDENT followed by LBRACKET
|
|
750
|
+
for idx, tok in enumerate(run_tokens[:-1]):
|
|
751
|
+
if tok.type == IDENT and run_tokens[idx + 1].type == LBRACKET:
|
|
752
|
+
is_indexed_assign_stmt = True
|
|
753
|
+
break
|
|
754
|
+
|
|
755
|
+
if has_assign or is_method_call_stmt or is_indexed_assign_stmt:
|
|
735
756
|
# Current token is on a new line and could start a new statement
|
|
736
757
|
# Check if it's IDENT (could be method call, function call, or property access)
|
|
737
758
|
if tj.type == IDENT:
|
|
738
759
|
# CRITICAL FIX: Don't break if the previous token was ASSIGN
|
|
739
760
|
# This means the IDENT is the RHS value, not a new statement
|
|
740
761
|
prev_tok = run_tokens[-1] if run_tokens else None
|
|
762
|
+
print(f" prev_tok={prev_tok.literal if prev_tok else None}, type={prev_tok.type if prev_tok else None}")
|
|
741
763
|
if prev_tok and prev_tok.type == ASSIGN:
|
|
742
764
|
# This IDENT is the RHS of the assignment, not a new statement
|
|
765
|
+
print(f" -> Continuing (RHS of assignment)")
|
|
743
766
|
pass # Don't break, continue collecting
|
|
744
767
|
else:
|
|
745
768
|
# This is likely a new statement on a new line
|
|
@@ -760,6 +783,26 @@ class StructuralAnalyzer:
|
|
|
760
783
|
# Look ahead: IDENT DOT IDENT ASSIGN is a property assignment
|
|
761
784
|
if j + 3 < n and tokens[j + 2].type == IDENT and tokens[j + 3].type == ASSIGN:
|
|
762
785
|
is_assignment_start = True
|
|
786
|
+
# Pattern 3: IDENT followed by LBRACKET could be indexed assignment (arr[i] = ...)
|
|
787
|
+
elif tj.type == IDENT and j + 1 < n and tokens[j + 1].type == LBRACKET:
|
|
788
|
+
# Look ahead to find matching RBRACKET and then ASSIGN
|
|
789
|
+
# This pattern is: IDENT [ ... ] ASSIGN
|
|
790
|
+
bracket_depth = 0
|
|
791
|
+
k = j + 1
|
|
792
|
+
found_assign_after_bracket = False
|
|
793
|
+
while k < n:
|
|
794
|
+
if tokens[k].type == LBRACKET:
|
|
795
|
+
bracket_depth += 1
|
|
796
|
+
elif tokens[k].type == RBRACKET:
|
|
797
|
+
bracket_depth -= 1
|
|
798
|
+
if bracket_depth == 0:
|
|
799
|
+
# Found matching closing bracket, check if next is ASSIGN
|
|
800
|
+
if k + 1 < n and tokens[k + 1].type == ASSIGN:
|
|
801
|
+
found_assign_after_bracket = True
|
|
802
|
+
break
|
|
803
|
+
k += 1
|
|
804
|
+
if found_assign_after_bracket:
|
|
805
|
+
is_assignment_start = True
|
|
763
806
|
|
|
764
807
|
is_new_statement = (
|
|
765
808
|
tj.type in stop_types or
|
|
@@ -802,6 +845,7 @@ class StructuralAnalyzer:
|
|
|
802
845
|
|
|
803
846
|
filtered_run_tokens = [tk for tk in run_tokens if not _is_empty_token(tk)]
|
|
804
847
|
if filtered_run_tokens: # Only create block if we have meaningful tokens
|
|
848
|
+
print(f"[DEBUG STRUCTURAL] Creating block from tokens: {[f'{t.type}:{t.literal}' for t in filtered_run_tokens[:10]]}")
|
|
805
849
|
self.blocks[block_id] = {
|
|
806
850
|
'id': block_id,
|
|
807
851
|
'type': 'statement',
|
|
@@ -993,11 +1037,12 @@ class StructuralAnalyzer:
|
|
|
993
1037
|
continue
|
|
994
1038
|
|
|
995
1039
|
# NEW: Check for line-based statement boundaries
|
|
996
|
-
# If we have balanced parens and the next token is on a new line and could start a new statement, create boundary
|
|
1040
|
+
# If we have balanced parens/brackets and the next token is on a new line and could start a new statement, create boundary
|
|
997
1041
|
if cur:
|
|
998
|
-
# Check if parens are balanced
|
|
1042
|
+
# Check if parens and brackets are balanced
|
|
999
1043
|
paren_count = sum(1 if tok.type == LPAREN else -1 if tok.type == RPAREN else 0 for tok in cur)
|
|
1000
|
-
if
|
|
1044
|
+
bracket_count = sum(1 if tok.type == LBRACKET else -1 if tok.type == RBRACKET else 0 for tok in cur)
|
|
1045
|
+
if paren_count == 0 and bracket_count == 0:
|
|
1001
1046
|
# Check if there's an ASSIGN in cur (this is a complete assignment statement)
|
|
1002
1047
|
has_assign = any(tok.type == ASSIGN for tok in cur)
|
|
1003
1048
|
if has_assign:
|
|
@@ -1005,7 +1050,7 @@ class StructuralAnalyzer:
|
|
|
1005
1050
|
last_line = cur[-1].line if cur else 0
|
|
1006
1051
|
if t.line > last_line:
|
|
1007
1052
|
# Check if current token could start a new statement
|
|
1008
|
-
# IDENT
|
|
1053
|
+
# IDENT could be a new statement (including indexed assignments like map[key] = val)
|
|
1009
1054
|
if t.type == IDENT:
|
|
1010
1055
|
# This is likely a new statement on a new line
|
|
1011
1056
|
results.append(cur)
|
|
@@ -23,7 +23,7 @@ class PackageManager:
|
|
|
23
23
|
self.installer = PackageInstaller(self.zpm_dir)
|
|
24
24
|
self.publisher = PackagePublisher(self.registry)
|
|
25
25
|
|
|
26
|
-
def init(self, name: str = None, version: str = "1.6.
|
|
26
|
+
def init(self, name: str = None, version: str = "1.6.6") -> Dict:
|
|
27
27
|
"""Initialize a new Zexus project with package.json"""
|
|
28
28
|
if self.config_file.exists():
|
|
29
29
|
print(f"⚠️ {self.config_file} already exists")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: zexus
|
|
3
|
-
Version: 1.6.
|
|
3
|
+
Version: 1.6.6
|
|
4
4
|
Summary: A modern, security-first programming language with blockchain support
|
|
5
5
|
Home-page: https://github.com/Zaidux/zexus-interpreter
|
|
6
6
|
Author: Zaidux
|
|
@@ -50,7 +50,7 @@ Dynamic: requires-python
|
|
|
50
50
|
|
|
51
51
|
<div align="center">
|
|
52
52
|
|
|
53
|
-

|
|
54
54
|
[](LICENSE)
|
|
55
55
|
[](https://python.org)
|
|
56
56
|
[](https://github.com/Zaidux/zexus-interpreter)
|
|
@@ -14,7 +14,6 @@ any.zx
|
|
|
14
14
|
check_verify_ast.py
|
|
15
15
|
comprehensive_test.zx
|
|
16
16
|
crypto.zx
|
|
17
|
-
debug_parse.py
|
|
18
17
|
debug_persist_ultimate.zx
|
|
19
18
|
demo_backend_server.zx
|
|
20
19
|
demo_backend_simple.zx
|
|
@@ -31,22 +30,7 @@ setup.cfg
|
|
|
31
30
|
setup.py
|
|
32
31
|
setup_stdlib.sh
|
|
33
32
|
shared_config.json
|
|
34
|
-
test_const_time_debug.zx
|
|
35
|
-
test_contract_assignment.zx
|
|
36
|
-
test_contract_debug.zx
|
|
37
|
-
test_contract_map.zx
|
|
38
33
|
test_data.json
|
|
39
|
-
test_entity_debug.zx
|
|
40
|
-
test_map_assignment.zx
|
|
41
|
-
test_map_debug.zx
|
|
42
|
-
test_map_len.zx
|
|
43
|
-
test_map_persistence.zx
|
|
44
|
-
test_nested_map_assignment.zx
|
|
45
|
-
test_simple_contract.zx
|
|
46
|
-
test_sqlite_python.py
|
|
47
|
-
test_state_variable_type.zx
|
|
48
|
-
test_storage_init.zx
|
|
49
|
-
test_storage_types.zx
|
|
50
34
|
ultimate_test.zx
|
|
51
35
|
zexus.json
|
|
52
36
|
zpics
|
|
@@ -363,6 +347,8 @@ examples/test_postgres.zx
|
|
|
363
347
|
examples/test_sqlite.zx
|
|
364
348
|
examples/token_contract.zx
|
|
365
349
|
examples/ziver_chain_test.zx
|
|
350
|
+
issues/ISSUE2.md
|
|
351
|
+
issues/ISSUE3.md
|
|
366
352
|
issues/ISSUSE1.md
|
|
367
353
|
linguist-submission/SUBMISSION_INSTRUCTIONS.md
|
|
368
354
|
linguist-submission/grammars.yml
|