zexus 1.8.2 → 1.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +89 -64
  2. package/package.json +1 -1
  3. package/rust_core/Cargo.lock +1 -1
  4. package/src/zexus/__init__.py +1 -1
  5. package/src/zexus/builtin_modules.py +50 -13
  6. package/src/zexus/cli/main.py +46 -1
  7. package/src/zexus/cli/zpm.py +1 -1
  8. package/src/zexus/evaluator/bytecode_compiler.py +11 -2
  9. package/src/zexus/evaluator/core.py +4 -1
  10. package/src/zexus/evaluator/expressions.py +11 -2
  11. package/src/zexus/evaluator/functions.py +72 -0
  12. package/src/zexus/evaluator/resource_limiter.py +1 -1
  13. package/src/zexus/evaluator/statements.py +44 -4
  14. package/src/zexus/kernel/__init__.py +34 -0
  15. package/src/zexus/kernel/hooks.py +276 -0
  16. package/src/zexus/kernel/registry.py +203 -0
  17. package/src/zexus/kernel/zir/__init__.py +145 -0
  18. package/src/zexus/lexer.py +7 -0
  19. package/src/zexus/object.py +28 -5
  20. package/src/zexus/parser/parser.py +53 -11
  21. package/src/zexus/parser/strategy_context.py +179 -10
  22. package/src/zexus/security.py +26 -2
  23. package/src/zexus/stdlib/blockchain.py +84 -0
  24. package/src/zexus/stdlib/http_server.py +2 -2
  25. package/src/zexus/stdlib/math.py +25 -17
  26. package/src/zexus/stdlib_integration.py +119 -2
  27. package/src/zexus/type_checker.py +17 -12
  28. package/src/zexus/vm/compiler.py +57 -6
  29. package/src/zexus/vm/fastops.c +4704 -1263
  30. package/src/zexus/vm/fastops.cpython-312-x86_64-linux-gnu.so +0 -0
  31. package/src/zexus/vm/fastops.pyx +81 -3
  32. package/src/zexus/vm/optimizer.py +65 -27
  33. package/src/zexus/vm/vm.py +871 -98
  34. package/src/zexus/zexus_ast.py +4 -1
  35. package/src/zexus/zpm/package_manager.py +1 -1
  36. package/src/zexus.egg-info/PKG-INFO +90 -65
  37. package/src/zexus.egg-info/SOURCES.txt +51 -0
@@ -0,0 +1,145 @@
1
+ """
2
+ Zexus Intermediate Representation (ZIR) — Opcode catalogue and validation.
3
+
4
+ This module formalises the opcodes the VM already uses, putting them into
5
+ an enum so that tooling (LSP, debugger, profiler) and domain authors can
6
+ reference them by name. It does NOT replace the existing Opcode class in
7
+ ``vm/compiler.py`` — it sits alongside it as a typed reference.
8
+
9
+ Opcode ranges
10
+ -------------
11
+ 0x0001–0x00FF Core operations (math, stack, variables)
12
+ 0x0100–0x01FF Control flow (jump, call, return, try)
13
+ 0x0200–0x02FF Memory / field access
14
+ 0x1000–0x1FFF Domain-specific (registered at runtime via DomainRegistry)
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ from enum import IntEnum
20
+ from typing import Optional
21
+
22
+
23
+ class CoreOpcode(IntEnum):
24
+ """Core opcodes understood by every execution backend.
25
+
26
+ These mirror the opcodes already defined in the VM — they provide
27
+ a stable, importable reference for tooling and domain code.
28
+ """
29
+
30
+ # -- Constants / stack --------------------------------------------------
31
+ LOAD_CONST = 0x0001
32
+ LOAD_NAME = 0x0002
33
+ STORE_NAME = 0x0003
34
+ POP_TOP = 0x0004
35
+ DUP_TOP = 0x0005
36
+
37
+ # -- Arithmetic ---------------------------------------------------------
38
+ ADD = 0x0010
39
+ SUB = 0x0011
40
+ MUL = 0x0012
41
+ DIV = 0x0013
42
+ MOD = 0x0014
43
+ POW = 0x0015
44
+ NEG = 0x0016
45
+
46
+ # -- Comparison ---------------------------------------------------------
47
+ EQ = 0x0020
48
+ NEQ = 0x0021
49
+ LT = 0x0022
50
+ GT = 0x0023
51
+ LTE = 0x0024
52
+ GTE = 0x0025
53
+
54
+ # -- Logic --------------------------------------------------------------
55
+ AND = 0x0030
56
+ OR = 0x0031
57
+ NOT = 0x0032
58
+
59
+ # -- Collections --------------------------------------------------------
60
+ BUILD_LIST = 0x0040
61
+ BUILD_MAP = 0x0041
62
+ INDEX = 0x0042
63
+ STORE_INDEX = 0x0043
64
+
65
+ # -- Control flow -------------------------------------------------------
66
+ JUMP = 0x0100
67
+ JUMP_IF_FALSE = 0x0101
68
+ CALL = 0x0102
69
+ RETURN = 0x0103
70
+ TRY = 0x0104
71
+ THROW = 0x0105
72
+
73
+ # -- Memory / fields ----------------------------------------------------
74
+ LOAD_FIELD = 0x0200
75
+ STORE_FIELD = 0x0201
76
+ ALLOC = 0x0202
77
+
78
+
79
+ # All valid core opcode values for fast membership testing
80
+ _CORE_OPCODES = frozenset(int(op) for op in CoreOpcode)
81
+
82
+
83
+ def is_core_opcode(opcode: int) -> bool:
84
+ """Return True if *opcode* is in the core range."""
85
+ return opcode in _CORE_OPCODES
86
+
87
+
88
+ def resolve_opcode_name(opcode: int) -> Optional[str]:
89
+ """Human-readable name for any opcode (core or domain).
90
+
91
+ Returns ``None`` for completely unknown opcodes.
92
+ """
93
+ # Try core first
94
+ try:
95
+ return CoreOpcode(opcode).name
96
+ except ValueError:
97
+ pass
98
+
99
+ # Try domain registry
100
+ from ..registry import get_registry
101
+ owner = get_registry().resolve_opcode(opcode)
102
+ if owner is not None:
103
+ domain = get_registry().get_domain(owner)
104
+ if domain:
105
+ return domain.opcodes.get(opcode)
106
+ return None
107
+
108
+
109
+ def validate_zir(
110
+ instructions: list,
111
+ *,
112
+ allowed_domains: Optional[set[str]] = None,
113
+ ) -> list[str]:
114
+ """Validate a sequence of ZIR instructions.
115
+
116
+ Returns a list of error strings (empty == valid).
117
+
118
+ Parameters
119
+ ----------
120
+ instructions
121
+ ``[(opcode, *operands), ...]``
122
+ allowed_domains
123
+ If given, only opcodes belonging to these domains (plus core)
124
+ are permitted.
125
+ """
126
+ from ..registry import get_registry
127
+
128
+ errors: list[str] = []
129
+ registry = get_registry()
130
+
131
+ for idx, instr in enumerate(instructions):
132
+ opcode = instr[0] if isinstance(instr, (list, tuple)) else instr
133
+
134
+ if opcode in _CORE_OPCODES:
135
+ continue
136
+
137
+ owner = registry.resolve_opcode(opcode)
138
+ if owner is None:
139
+ errors.append(f"Instruction {idx}: unknown opcode 0x{opcode:04X}")
140
+ elif allowed_domains is not None and owner not in allowed_domains:
141
+ errors.append(
142
+ f"Instruction {idx}: opcode 0x{opcode:04X} belongs to "
143
+ f"domain {owner!r} which is not in allowed set"
144
+ )
145
+ return errors
@@ -915,6 +915,13 @@ class Lexer:
915
915
  return token
916
916
  return IDENT
917
917
 
918
+ # 'storage' is only a keyword after 'persistent' (for 'persistent storage ...')
919
+ # In all other contexts, treat as a regular identifier.
920
+ if ident == "storage":
921
+ if self.last_token_type == PERSISTENT:
922
+ return token # STORAGE keyword
923
+ return IDENT
924
+
918
925
  if ident in _STRICT_KEYWORDS:
919
926
  return token
920
927
 
@@ -11,7 +11,7 @@ from threading import Lock
11
11
 
12
12
 
13
13
  # ===============================================
14
- # SECURITY: Path & Identifier Sanitization (v1.8.1)
14
+ # SECURITY: Path & Identifier Sanitization (v1.8.3)
15
15
  # ===============================================
16
16
 
17
17
  def _safe_resolve_path(path: str, sandbox: str = None) -> str:
@@ -183,6 +183,11 @@ class List(Object):
183
183
  self.elements.extend(other_list.elements)
184
184
  return self
185
185
 
186
+ def is_empty(self):
187
+ """Q-002 fix: Check if list is empty, returns Boolean"""
188
+ from .object import Boolean
189
+ return Boolean(len(self.elements) == 0)
190
+
186
191
  class Map(Object):
187
192
  def __init__(self, pairs):
188
193
  self.pairs = pairs
@@ -242,9 +247,17 @@ class Map(Object):
242
247
  return Array([Array([k, v]) for k, v in self.pairs.items()])
243
248
 
244
249
  def has(self, key):
245
- """Check if key exists in map"""
250
+ """Check if key exists in map (with key normalization)"""
246
251
  from .object import Boolean
247
- return Boolean(key in self.pairs)
252
+ # Q-001 fix: Try direct, then normalized key lookup (same as get())
253
+ if key in self.pairs:
254
+ return Boolean(True)
255
+ if isinstance(key, str):
256
+ str_key = String(key)
257
+ if str_key in self.pairs:
258
+ return Boolean(True)
259
+ norm_key = self._normalize_key(key)
260
+ return Boolean(norm_key in self.pairs)
248
261
 
249
262
  def size(self):
250
263
  """Return number of entries in map"""
@@ -1193,8 +1206,18 @@ class Environment:
1193
1206
  return val
1194
1207
 
1195
1208
  def clone_for_closure(self):
1196
- """Create a shallow copy of the environment for closure capture."""
1197
- cloned = Environment(outer=self.outer, persistence_scope=self.persistence_scope)
1209
+ """Create a shallow copy of the environment for closure capture.
1210
+
1211
+ The clone uses the original environment as its outer scope so that
1212
+ identifiers registered after cloning (e.g. contracts/entities defined
1213
+ later in sequential evaluation) remain reachable through the scope
1214
+ chain. Mutations via ``assign()`` also propagate back to the
1215
+ original environment, fixing R-018/R-019 (side-effects from
1216
+ module-level helpers called inside contract methods silently dropped).
1217
+ """
1218
+ # Use *self* (the live env) as outer instead of self.outer so that
1219
+ # names added to the original env after cloning are still visible.
1220
+ cloned = Environment(outer=self, persistence_scope=self.persistence_scope)
1198
1221
  cloned.store = dict(self.store)
1199
1222
  cloned.const_vars = set(self.const_vars)
1200
1223
  cloned.exports = dict(self.exports)
@@ -2919,7 +2919,19 @@ class UltimateParser:
2919
2919
  self.errors.append("Expected identifier after 'each' in for-each loop")
2920
2920
  return None
2921
2921
 
2922
- stmt.item = Identifier(value=self.cur_token.literal)
2922
+ first_ident = Identifier(value=self.cur_token.literal)
2923
+
2924
+ # R-006/R-007 fix: Support two-variable form: for each i, item in list
2925
+ # or: for each key, val in map
2926
+ if self.peek_token_is(COMMA):
2927
+ self.next_token() # consume comma
2928
+ if not self.expect_peek(IDENT):
2929
+ self.errors.append("Expected second identifier after ',' in for-each loop")
2930
+ return None
2931
+ stmt.index = first_ident
2932
+ stmt.item = Identifier(value=self.cur_token.literal)
2933
+ else:
2934
+ stmt.item = first_ident
2923
2935
 
2924
2936
  if not self.expect_peek(IN):
2925
2937
  self.errors.append("Expected 'in' after item identifier in for-each loop")
@@ -3417,8 +3429,9 @@ class UltimateParser:
3417
3429
  self.cur_token = saved_cur
3418
3430
  self.peek_token = saved_peek
3419
3431
 
3420
- # If next token after IDENT is LBRACKET or ASSIGN, it's likely a new statement
3421
- if next_next.type in (LBRACKET, ASSIGN, LPAREN):
3432
+ # If next token after IDENT is LBRACKET, ASSIGN, LPAREN, or DOT,
3433
+ # it's likely a new statement (e.g., this.x = 1\nthis.y = 2)
3434
+ if next_next.type in (LBRACKET, ASSIGN, LPAREN, DOT):
3422
3435
  break
3423
3436
  else:
3424
3437
  break
@@ -3944,7 +3957,7 @@ class UltimateParser:
3944
3957
  methods.append(method)
3945
3958
  continue
3946
3959
 
3947
- if self.cur_token_is(IDENT):
3960
+ if self.cur_token_is(IDENT) or (self.cur_token.literal and self.peek_token_is(COLON)):
3948
3961
  prop_name = self.cur_token.literal
3949
3962
 
3950
3963
  # Expect colon after property name
@@ -3960,9 +3973,17 @@ class UltimateParser:
3960
3973
  if self.cur_token_is(IDENT):
3961
3974
  prop_type = self.cur_token.literal
3962
3975
 
3976
+ # Check for default value: = expression
3977
+ default_value = None
3978
+ if self.peek_token_is(ASSIGN):
3979
+ self.next_token() # Move to =
3980
+ self.next_token() # Move to expression
3981
+ default_value = self.parse_expression(LOWEST)
3982
+
3963
3983
  properties.append({
3964
3984
  "name": prop_name,
3965
- "type": prop_type
3985
+ "type": prop_type,
3986
+ "default_value": default_value
3966
3987
  })
3967
3988
 
3968
3989
  # Check for comma or new property
@@ -4070,12 +4091,33 @@ class UltimateParser:
4070
4091
 
4071
4092
  # Check for state variable declaration
4072
4093
  if self.cur_token_is(STATE):
4073
- state_stmt = self.parse_state_statement()
4074
- if state_stmt:
4075
- # Attach parsed modifiers
4076
- state_stmt.modifiers = modifiers
4077
- storage_vars.append(state_stmt)
4078
- print(f"DEBUG: Parsed state {state_stmt.name.value} modifiers={modifiers}")
4094
+ # R-013 fix: Support state { field: val, field2: val2 } block syntax
4095
+ if self.peek_token_is(LBRACE):
4096
+ self.next_token() # consume '{'
4097
+ self.next_token() # move to first field or '}'
4098
+ while not self.cur_token_is(RBRACE) and not self.cur_token_is(EOF):
4099
+ if self.cur_token_is(IDENT):
4100
+ field_name = self.cur_token.literal
4101
+ # Expect colon or '=' after field name
4102
+ if self.peek_token_is(COLON) or self.peek_token_is(ASSIGN):
4103
+ self.next_token() # consume ':' or '='
4104
+ self.next_token() # move to value
4105
+ field_value = self.parse_expression(LOWEST)
4106
+ from ..zexus_ast import StateStatement as _SS
4107
+ state_stmt = _SS(Identifier(field_name), field_value, modifiers=modifiers)
4108
+ storage_vars.append(state_stmt)
4109
+ # Consume optional comma or semicolon
4110
+ if self.peek_token_is(COMMA):
4111
+ self.next_token()
4112
+ if self.peek_token_is(SEMICOLON):
4113
+ self.next_token()
4114
+ self.next_token()
4115
+ else:
4116
+ state_stmt = self.parse_state_statement()
4117
+ if state_stmt:
4118
+ # Attach parsed modifiers
4119
+ state_stmt.modifiers = modifiers
4120
+ storage_vars.append(state_stmt)
4079
4121
 
4080
4122
  # Check for data member declaration
4081
4123
  elif self.cur_token_is(DATA):
@@ -1514,7 +1514,8 @@ class ContextStackParser:
1514
1514
  continue
1515
1515
 
1516
1516
  # Parse regular properties
1517
- if tokens[i].type == IDENT:
1517
+ # Accept IDENT and keywords (like DEBUG, DATA, etc.) as property names
1518
+ if tokens[i].type == IDENT or (tokens[i].literal and i + 1 < brace_end and tokens[i + 1].type == COLON):
1518
1519
  prop_name = tokens[i].literal
1519
1520
  parser_debug(f" 📝 Found property name: {prop_name}")
1520
1521
 
@@ -1522,14 +1523,48 @@ class ContextStackParser:
1522
1523
  if i + 1 < brace_end and tokens[i + 1].type == COLON:
1523
1524
  if i + 2 < brace_end:
1524
1525
  prop_type = tokens[i + 2].literal
1526
+ default_value = None
1527
+ prop_end = i + 3
1528
+
1529
+ # Check for default value: = expression
1530
+ if prop_end < brace_end and tokens[prop_end].type == ASSIGN:
1531
+ prop_end += 1 # Skip =
1532
+ # Collect default value tokens until comma, newline-property, or end
1533
+ val_tokens = []
1534
+ nesting = 0
1535
+ while prop_end < brace_end:
1536
+ vt = tokens[prop_end]
1537
+ if vt.type in {LPAREN, LBRACE, LBRACKET}:
1538
+ nesting += 1
1539
+ elif vt.type in {RPAREN, RBRACE, RBRACKET}:
1540
+ nesting -= 1
1541
+ elif nesting == 0 and vt.type == COMMA:
1542
+ prop_end += 1 # Skip comma
1543
+ break
1544
+ elif nesting == 0 and len(val_tokens) > 0:
1545
+ # New property on next line (detect by newline + colon lookahead)
1546
+ prev_vt = val_tokens[-1]
1547
+ if vt.line > prev_vt.line:
1548
+ # Check if this token is followed by COLON (indicating property name)
1549
+ if prop_end + 1 < brace_end and tokens[prop_end + 1].type == COLON:
1550
+ break
1551
+ val_tokens.append(vt)
1552
+ prop_end += 1
1553
+
1554
+ if val_tokens:
1555
+ default_value = self._parse_expression(val_tokens)
1556
+
1525
1557
  # Use AstNodeShim so evaluator can use .name.value
1526
1558
  properties.append(AstNodeShim(
1527
1559
  name=Identifier(prop_name),
1528
1560
  type=Identifier(prop_type),
1529
- default_value=None
1561
+ default_value=default_value
1530
1562
  ))
1531
- parser_debug(f" 📝 Property: {prop_name}: {prop_type}")
1532
- i += 3
1563
+ parser_debug(f" 📝 Property: {prop_name}: {prop_type}" + (f" = {default_value}" if default_value else ""))
1564
+ i = prop_end
1565
+ # Skip trailing comma
1566
+ if i < brace_end and tokens[i].type == COMMA:
1567
+ i += 1
1533
1568
  continue
1534
1569
 
1535
1570
  i += 1
@@ -1720,6 +1755,56 @@ class ContextStackParser:
1720
1755
  # Move to identifier after "state"
1721
1756
  i += 1
1722
1757
 
1758
+ # R-013 fix: Support state { field: val, field2: val2 } block syntax
1759
+ if i < brace_end and tokens[i].type == LBRACE:
1760
+ # Multi-field state block
1761
+ i += 1 # skip opening brace
1762
+ state_brace_depth = 1
1763
+ while i < brace_end and state_brace_depth > 0:
1764
+ if tokens[i].type == RBRACE:
1765
+ state_brace_depth -= 1
1766
+ if state_brace_depth == 0:
1767
+ i += 1
1768
+ break
1769
+ elif tokens[i].type == LBRACE:
1770
+ state_brace_depth += 1
1771
+ i += 1
1772
+ elif tokens[i].type == IDENT or (hasattr(tokens[i], 'literal') and tokens[i].type not in (COMMA, SEMICOLON)):
1773
+ field_name = tokens[i].literal
1774
+ field_val = None
1775
+ ci = i + 1
1776
+ # Expect colon or =
1777
+ if ci < brace_end and tokens[ci].type in (COLON, ASSIGN):
1778
+ ci += 1
1779
+ if ci < brace_end:
1780
+ vt = tokens[ci]
1781
+ if vt.type == STRING:
1782
+ field_val = StringLiteral(vt.literal)
1783
+ elif vt.type == INT:
1784
+ field_val = IntegerLiteral(int(vt.literal))
1785
+ elif vt.type == FLOAT:
1786
+ field_val = FloatLiteral(float(vt.literal))
1787
+ elif vt.type == TRUE:
1788
+ field_val = Boolean(True)
1789
+ elif vt.type == FALSE:
1790
+ field_val = Boolean(False)
1791
+ elif vt.type == IDENT:
1792
+ field_val = Identifier(vt.literal)
1793
+ ci += 1
1794
+ # Skip optional comma
1795
+ if ci < brace_end and tokens[ci].type == COMMA:
1796
+ ci += 1
1797
+ storage_vars.append(AstNodeShim(
1798
+ name=Identifier(field_name),
1799
+ type=Identifier("any"),
1800
+ initial_value=field_val,
1801
+ default_value=field_val
1802
+ ))
1803
+ i = ci
1804
+ else:
1805
+ i += 1
1806
+ continue
1807
+
1723
1808
  # State variable name can be an identifier OR a keyword being used as an identifier
1724
1809
  # (e.g., "state data = {}" where "data" is a keyword but used as a variable name)
1725
1810
  if i < brace_end and (tokens[i].type == IDENT or hasattr(tokens[i], 'literal')):
@@ -2461,8 +2546,13 @@ class ContextStackParser:
2461
2546
  j += 1 # Skip the semicolon
2462
2547
  break
2463
2548
  # Stop at statement keywords when not nested
2549
+ # But NOT if the previous token was DOT (property/method access)
2464
2550
  elif nesting == 0 and t.type in statement_starters and j > i + 1:
2465
- break
2551
+ prev_t = tokens[j - 1] if j > 0 else None
2552
+ if prev_t and prev_t.type == DOT:
2553
+ pass # Don't break - it's a property name
2554
+ else:
2555
+ break
2466
2556
  j += 1
2467
2557
 
2468
2558
  print_tokens = tokens[i:j]
@@ -3489,8 +3579,70 @@ class ContextStackParser:
3489
3579
 
3490
3580
  elif tokens[j].type == ELSE:
3491
3581
  j += 1
3582
+ # Check for "else if" pattern (ELSE followed by IF = elif chain)
3583
+ if j < len(tokens) and tokens[j].type == IF:
3584
+ # Handle "else if" inline as an elif chain entry
3585
+ # Parse the else-if condition (same as elif)
3586
+ j += 1 # Skip the IF token
3587
+ elif_cond_tokens = []
3588
+ elif_paren_depth = 0
3589
+ elif_skipped_outer = False
3590
+ while j < len(tokens) and tokens[j].type not in [LBRACE, COLON]:
3591
+ if tokens[j].type == LPAREN:
3592
+ if len(elif_cond_tokens) == 0 and elif_paren_depth == 0:
3593
+ j += 1
3594
+ elif_paren_depth += 1
3595
+ elif_skipped_outer = True
3596
+ continue
3597
+ else:
3598
+ elif_paren_depth += 1
3599
+ elif tokens[j].type == RPAREN:
3600
+ elif_paren_depth -= 1
3601
+ if elif_paren_depth == 0 and elif_skipped_outer and len(elif_cond_tokens) > 0:
3602
+ j += 1
3603
+ if j < len(tokens) and tokens[j].type in [LBRACE, COLON]:
3604
+ break
3605
+ elif_skipped_outer = False
3606
+ continue
3607
+ elif_cond_tokens.append(tokens[j])
3608
+ j += 1
3609
+
3610
+ elif_cond = self._parse_expression(elif_cond_tokens) if elif_cond_tokens else Identifier("true")
3611
+
3612
+ # Collect else-if block
3613
+ if j < len(tokens) and tokens[j].type == LBRACE:
3614
+ j += 1
3615
+ elif_inner = []
3616
+ depth = 1
3617
+ while j < len(tokens) and depth > 0:
3618
+ if tokens[j].type == LBRACE:
3619
+ depth += 1
3620
+ elif tokens[j].type == RBRACE:
3621
+ depth -= 1
3622
+ if depth == 0:
3623
+ break
3624
+ elif_inner.append(tokens[j])
3625
+ j += 1
3626
+ elif_block = BlockStatement()
3627
+ elif_block.statements = self._parse_block_statements(elif_inner)
3628
+ j += 1
3629
+ elif j < len(tokens) and tokens[j].type == COLON:
3630
+ j += 1
3631
+ elif_inner = []
3632
+ while j < len(tokens):
3633
+ if tokens[j].type in [IF, ELIF, ELSE, WHILE, FOR, ACTION, FUNCTION, LET, CONST, RETURN, CONTINUE, USE, EXPORT]:
3634
+ break
3635
+ elif_inner.append(tokens[j])
3636
+ j += 1
3637
+ elif_block = BlockStatement()
3638
+ elif_block.statements = self._parse_block_statements(elif_inner)
3639
+ else:
3640
+ elif_block = BlockStatement()
3641
+
3642
+ elif_parts.append((elif_cond, elif_block))
3643
+ continue # Continue the while loop to check for more elif/else
3492
3644
  # Collect else block
3493
- if j < len(tokens) and tokens[j].type == LBRACE:
3645
+ elif j < len(tokens) and tokens[j].type == LBRACE:
3494
3646
  # Brace-style
3495
3647
  j += 1
3496
3648
  else_inner = []
@@ -3611,7 +3763,10 @@ class ContextStackParser:
3611
3763
  # Don't break on statement starters that are inside braces
3612
3764
  # Only break if it's truly a new statement (e.g., not FUNCTION inside return expr)
3613
3765
  # ALSO: Don't break on the FIRST token (the return value itself), even if it's a keyword
3614
- if len(value_tokens) > 0 and t.type in statement_starters and t.type not in {FUNCTION, ACTION, RETURN}:
3766
+ # ALSO: Don't break on tokens that follow a DOT (property/method access)
3767
+ prev_val_token = value_tokens[-1] if value_tokens else None
3768
+ is_after_dot = prev_val_token and prev_val_token.type == DOT
3769
+ if len(value_tokens) > 0 and t.type in statement_starters and t.type not in {FUNCTION, ACTION, RETURN} and not is_after_dot:
3615
3770
  break
3616
3771
 
3617
3772
  value_tokens.append(t)
@@ -3764,9 +3919,22 @@ class ContextStackParser:
3764
3919
 
3765
3920
  # Collect iterator variable name
3766
3921
  item_name = None
3922
+ index_name = None
3767
3923
  if j < len(tokens) and tokens[j].type == IDENT:
3768
- item_name = tokens[j].literal
3924
+ first_ident = tokens[j].literal
3769
3925
  j += 1
3926
+
3927
+ # R-006/R-007 fix: Check for two-variable form: for each i, item in ...
3928
+ if j < len(tokens) and tokens[j].type == COMMA:
3929
+ j += 1 # skip comma
3930
+ if j < len(tokens) and tokens[j].type == IDENT:
3931
+ index_name = first_ident
3932
+ item_name = tokens[j].literal
3933
+ j += 1
3934
+ else:
3935
+ item_name = first_ident
3936
+ else:
3937
+ item_name = first_ident
3770
3938
 
3771
3939
  # Expect IN keyword
3772
3940
  if j < len(tokens) and tokens[j].type == IN:
@@ -3807,7 +3975,8 @@ class ContextStackParser:
3807
3975
  stmt = ForEachStatement(
3808
3976
  item=Identifier(item_name if item_name else 'item'),
3809
3977
  iterable=iterable,
3810
- body=body_block
3978
+ body=body_block,
3979
+ index=Identifier(index_name) if index_name else None
3811
3980
  )
3812
3981
  if stmt:
3813
3982
  statements.append(stmt)
@@ -4165,7 +4334,7 @@ class ContextStackParser:
4165
4334
  # Only check when we've completed a previous statement (e.g., after function call)
4166
4335
  # Don't check if the last token was DOT (we're in the middle of property access)
4167
4336
  # Don't check if the last token was ASSIGN (we're in the RHS of an assignment)
4168
- if nesting == 0 and len(run_tokens) > 0 and t.type == IDENT:
4337
+ if nesting == 0 and len(run_tokens) > 0 and t.type in (IDENT, THIS):
4169
4338
  # Only detect new assignment if previous token suggests end of previous statement
4170
4339
  # E.g., after RPAREN (end of function call) or after a complete value
4171
4340
  prev_token = run_tokens[-1] if run_tokens else None
@@ -1466,6 +1466,11 @@ class SmartContract:
1466
1466
  instance.deploy(evaluated_storage_values=initial_storage)
1467
1467
  instance.parent_contract = self
1468
1468
 
1469
+ # R-004 fix: Auto-call init() if it exists in the contract actions
1470
+ if 'init' in instance.actions:
1471
+ init_args = args if args else []
1472
+ instance.call_method('init', init_args)
1473
+
1469
1474
  return instance
1470
1475
 
1471
1476
  def __call__(self, *args):
@@ -1694,7 +1699,10 @@ class SmartContract:
1694
1699
  result = evaluator.eval_node(action.body, action_env, stack_trace=[])
1695
1700
 
1696
1701
  # Save any modified state variables back to storage
1697
- # SKIP variables that were set via this.property (direct storage updates)
1702
+ # For variables updated via this.property (direct storage updates),
1703
+ # also refresh action_env so that subsequent in-place mutations on
1704
+ # OTHER state vars are not overwritten with stale references.
1705
+ # This fixes R-015: push after map[key]=val being silently ignored.
1698
1706
  for var_node in self.storage_vars:
1699
1707
  # Extract variable name from node (same logic as above)
1700
1708
  var_name = None
@@ -1706,11 +1714,27 @@ class SmartContract:
1706
1714
  var_name = var_node
1707
1715
 
1708
1716
  if var_name:
1709
- # Skip if this was updated via this.property = value
1710
1717
  if var_name in self._direct_storage_updates:
1718
+ # Variable was set directly via this.prop = val.
1719
+ # Storage already has the authoritative value.
1720
+ # Refresh action_env to keep references consistent.
1721
+ latest = self.storage.get(var_name)
1722
+ if latest is not None:
1723
+ action_env.set(var_name, latest)
1711
1724
  continue
1712
1725
 
1726
+ # For vars NOT directly updated, sync action_env → storage.
1727
+ # Also check if the storage version was mutated in-place
1728
+ # (e.g. via this.list.push()) and prefer the latest reference.
1713
1729
  current_value = action_env.get(var_name)
1730
+ storage_value = self.storage.get(var_name)
1731
+
1732
+ # Use the storage reference if it's a mutable collection
1733
+ # that may have been mutated in-place via this.X access
1734
+ if storage_value is not None and storage_value is not current_value:
1735
+ # Storage was mutated independently — prefer storage version
1736
+ current_value = storage_value
1737
+
1714
1738
  if var_name == 'chain' and zexus_config.should_log('debug'):
1715
1739
  size = None
1716
1740
  try: