astreum 0.2.41__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. astreum/__init__.py +16 -7
  2. astreum/{_communication → communication}/__init__.py +3 -3
  3. astreum/communication/handlers/handshake.py +83 -0
  4. astreum/communication/handlers/ping.py +48 -0
  5. astreum/communication/handlers/storage_request.py +81 -0
  6. astreum/communication/models/__init__.py +0 -0
  7. astreum/{_communication → communication/models}/message.py +1 -0
  8. astreum/communication/models/peer.py +23 -0
  9. astreum/{_communication → communication/models}/route.py +45 -8
  10. astreum/{_communication → communication}/setup.py +46 -95
  11. astreum/communication/start.py +38 -0
  12. astreum/consensus/__init__.py +20 -0
  13. astreum/consensus/genesis.py +66 -0
  14. astreum/consensus/models/__init__.py +0 -0
  15. astreum/consensus/models/account.py +84 -0
  16. astreum/consensus/models/accounts.py +72 -0
  17. astreum/consensus/models/block.py +364 -0
  18. astreum/{_consensus → consensus/models}/chain.py +7 -7
  19. astreum/{_consensus → consensus/models}/fork.py +8 -8
  20. astreum/consensus/models/receipt.py +98 -0
  21. astreum/consensus/models/transaction.py +213 -0
  22. astreum/{_consensus → consensus}/setup.py +26 -11
  23. astreum/consensus/start.py +68 -0
  24. astreum/consensus/validator.py +95 -0
  25. astreum/{_consensus → consensus}/workers/discovery.py +20 -1
  26. astreum/consensus/workers/validation.py +291 -0
  27. astreum/{_consensus → consensus}/workers/verify.py +32 -3
  28. astreum/machine/__init__.py +20 -0
  29. astreum/machine/evaluations/__init__.py +0 -0
  30. astreum/machine/evaluations/high_evaluation.py +237 -0
  31. astreum/machine/evaluations/low_evaluation.py +281 -0
  32. astreum/machine/evaluations/script_evaluation.py +27 -0
  33. astreum/machine/models/__init__.py +0 -0
  34. astreum/machine/models/environment.py +31 -0
  35. astreum/machine/models/expression.py +218 -0
  36. astreum/{_lispeum → machine}/parser.py +26 -31
  37. astreum/machine/tokenizer.py +90 -0
  38. astreum/node.py +73 -781
  39. astreum/storage/__init__.py +7 -0
  40. astreum/storage/actions/get.py +69 -0
  41. astreum/storage/actions/set.py +132 -0
  42. astreum/storage/models/atom.py +107 -0
  43. astreum/{_storage/patricia.py → storage/models/trie.py} +236 -177
  44. astreum/storage/setup.py +44 -15
  45. astreum/utils/bytes.py +24 -0
  46. astreum/utils/integer.py +25 -0
  47. astreum/utils/logging.py +219 -0
  48. astreum-0.3.1.dist-info/METADATA +160 -0
  49. astreum-0.3.1.dist-info/RECORD +62 -0
  50. astreum/_communication/peer.py +0 -11
  51. astreum/_consensus/__init__.py +0 -20
  52. astreum/_consensus/account.py +0 -170
  53. astreum/_consensus/accounts.py +0 -67
  54. astreum/_consensus/block.py +0 -328
  55. astreum/_consensus/genesis.py +0 -141
  56. astreum/_consensus/receipt.py +0 -177
  57. astreum/_consensus/transaction.py +0 -192
  58. astreum/_consensus/workers/validation.py +0 -122
  59. astreum/_lispeum/__init__.py +0 -16
  60. astreum/_lispeum/environment.py +0 -13
  61. astreum/_lispeum/expression.py +0 -37
  62. astreum/_lispeum/high_evaluation.py +0 -177
  63. astreum/_lispeum/low_evaluation.py +0 -123
  64. astreum/_lispeum/tokenizer.py +0 -22
  65. astreum/_node.py +0 -58
  66. astreum/_storage/__init__.py +0 -5
  67. astreum/_storage/atom.py +0 -117
  68. astreum/format.py +0 -75
  69. astreum/models/block.py +0 -441
  70. astreum/models/merkle.py +0 -205
  71. astreum/models/patricia.py +0 -393
  72. astreum/storage/object.py +0 -68
  73. astreum-0.2.41.dist-info/METADATA +0 -146
  74. astreum-0.2.41.dist-info/RECORD +0 -53
  75. /astreum/{models → communication/handlers}/__init__.py +0 -0
  76. /astreum/{_communication → communication/models}/ping.py +0 -0
  77. /astreum/{_communication → communication}/util.py +0 -0
  78. /astreum/{_consensus → consensus}/workers/__init__.py +0 -0
  79. /astreum/{_lispeum → machine/models}/meter.py +0 -0
  80. {astreum-0.2.41.dist-info → astreum-0.3.1.dist-info}/WHEEL +0 -0
  81. {astreum-0.2.41.dist-info → astreum-0.3.1.dist-info}/licenses/LICENSE +0 -0
  82. {astreum-0.2.41.dist-info → astreum-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,291 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from queue import Empty
5
+ from typing import Any, Callable
6
+
7
+ from ..models.account import Account
8
+ from ..models.accounts import Accounts
9
+ from ..models.block import Block
10
+ from ..models.transaction import apply_transaction
11
+ from ..validator import current_validator
12
+ from ...storage.models.atom import bytes_list_to_atoms
13
+ from ...communication.models.message import Message, MessageTopic
14
+ from ...communication.models.ping import Ping
15
+
16
+
17
+ def make_validation_worker(
18
+ node: Any,
19
+ ) -> Callable[[], None]:
20
+ """Build the validation worker bound to the given node."""
21
+
22
+ def _validation_worker() -> None:
23
+ node_logger = node.logger
24
+ node_logger.info("Validation worker started")
25
+ stop = node._validation_stop_event
26
+
27
+ def _award_validator_reward(block: Block, reward_amount: int) -> None:
28
+ """Credit the validator account with the provided reward."""
29
+ if reward_amount <= 0:
30
+ return
31
+ accounts = getattr(block, "accounts", None)
32
+ validator_key = getattr(block, "validator_public_key", None)
33
+ if accounts is None or not validator_key:
34
+ node_logger.debug(
35
+ "Skipping validator reward; accounts snapshot or key missing"
36
+ )
37
+ return
38
+ try:
39
+ validator_account = accounts.get_account(
40
+ address=validator_key, node=node
41
+ )
42
+ except Exception:
43
+ node_logger.exception("Unable to load validator account for reward")
44
+ return
45
+ if validator_account is None:
46
+ validator_account = Account.create()
47
+ validator_account.balance += reward_amount
48
+ accounts.set_account(validator_key, validator_account)
49
+
50
+ while not stop.is_set():
51
+ validation_public_key = getattr(node, "validation_public_key", None)
52
+ if not validation_public_key:
53
+ node_logger.debug("Validation public key unavailable; sleeping")
54
+ time.sleep(0.5)
55
+ continue
56
+
57
+ latest_block_hash = getattr(node, "latest_block_hash", None)
58
+ if not isinstance(latest_block_hash, (bytes, bytearray)):
59
+ node_logger.warning("Missing latest_block_hash; retrying")
60
+ time.sleep(0.5)
61
+ continue
62
+
63
+ node_logger.debug(
64
+ "Querying current validator for block %s",
65
+ latest_block_hash.hex()
66
+ if isinstance(latest_block_hash, (bytes, bytearray))
67
+ else latest_block_hash,
68
+ )
69
+ try:
70
+ scheduled_validator, _ = current_validator(node, latest_block_hash)
71
+ except Exception as exc:
72
+ node_logger.exception("Unable to determine current validator: %s", exc)
73
+ time.sleep(0.5)
74
+ continue
75
+
76
+ if scheduled_validator != validation_public_key:
77
+ expected_hex = (
78
+ scheduled_validator.hex()
79
+ if isinstance(scheduled_validator, (bytes, bytearray))
80
+ else scheduled_validator
81
+ )
82
+ node_logger.debug("Current validator mismatch; expected %s", expected_hex)
83
+ time.sleep(0.5)
84
+ continue
85
+
86
+ try:
87
+ previous_block = Block.from_atom(node, latest_block_hash)
88
+ except Exception:
89
+ node_logger.exception("Unable to load previous block for validation")
90
+ time.sleep(0.5)
91
+ continue
92
+
93
+ try:
94
+ current_hash = node._validation_transaction_queue.get_nowait()
95
+ queue_empty = False
96
+ except Empty:
97
+ current_hash = None
98
+ queue_empty = True
99
+ node_logger.debug(
100
+ "No pending validation transactions; generating empty block"
101
+ )
102
+
103
+ try:
104
+ accounts_snapshot = Accounts(root_hash=previous_block.accounts_hash)
105
+ except Exception:
106
+ accounts_snapshot = None
107
+ node_logger.warning("Unable to initialise accounts snapshot for block")
108
+
109
+ new_block = Block(
110
+ chain_id=getattr(node, "chain", 0),
111
+ previous_block_hash=latest_block_hash,
112
+ previous_block=previous_block,
113
+ number=(previous_block.number or 0) + 1,
114
+ timestamp=None,
115
+ accounts_hash=previous_block.accounts_hash,
116
+ transactions_total_fees=0,
117
+ transactions_hash=None,
118
+ receipts_hash=None,
119
+ delay_difficulty=None,
120
+ validator_public_key=validation_public_key,
121
+ nonce=0,
122
+ signature=None,
123
+ accounts=accounts_snapshot,
124
+ transactions=[],
125
+ receipts=[],
126
+ )
127
+ node_logger.debug(
128
+ "Creating block #%s extending %s",
129
+ new_block.number,
130
+ (
131
+ node.latest_block_hash.hex()
132
+ if isinstance(node.latest_block_hash, (bytes, bytearray))
133
+ else node.latest_block_hash
134
+ ),
135
+ )
136
+
137
+ # we may want to add a timer to process part of the txs only on a slow computer
138
+ total_fees = 0
139
+ while current_hash is not None:
140
+ try:
141
+ total_fees += apply_transaction(node, new_block, current_hash)
142
+ except NotImplementedError:
143
+ tx_hex = (
144
+ current_hash.hex()
145
+ if isinstance(current_hash, (bytes, bytearray))
146
+ else current_hash
147
+ )
148
+ node_logger.warning("Transaction %s unsupported; re-queued", tx_hex)
149
+ node._validation_transaction_queue.put(current_hash)
150
+ time.sleep(0.5)
151
+ break
152
+ except Exception:
153
+ tx_hex = (
154
+ current_hash.hex()
155
+ if isinstance(current_hash, (bytes, bytearray))
156
+ else current_hash
157
+ )
158
+ node_logger.exception("Failed applying transaction %s", tx_hex)
159
+
160
+ try:
161
+ current_hash = node._validation_transaction_queue.get_nowait()
162
+ except Empty:
163
+ current_hash = None
164
+
165
+ new_block.transactions_total_fees = total_fees
166
+ reward_amount = total_fees if total_fees > 0 else 1
167
+ if total_fees == 0 and queue_empty:
168
+ node_logger.debug("Awarding base validator reward of 1 aster")
169
+ elif total_fees > 0:
170
+ node_logger.debug(
171
+ "Collected %d aster in transaction fees for this block", total_fees
172
+ )
173
+ _award_validator_reward(new_block, reward_amount)
174
+
175
+ # create an atom list of transactions, save the list head hash as the block's transactions_hash
176
+ transactions = new_block.transactions or []
177
+ tx_hashes = [bytes(tx.hash) for tx in transactions if tx.hash]
178
+ head_hash, _ = bytes_list_to_atoms(tx_hashes)
179
+ new_block.transactions_hash = head_hash
180
+ node_logger.debug("Block includes %d transactions", len(transactions))
181
+
182
+ receipts = new_block.receipts or []
183
+ receipt_atoms = []
184
+ receipt_hashes = []
185
+ for rcpt in receipts:
186
+ receipt_id, atoms = rcpt.to_atom()
187
+ receipt_atoms.extend(atoms)
188
+ receipt_hashes.append(bytes(receipt_id))
189
+ receipts_head, _ = bytes_list_to_atoms(receipt_hashes)
190
+ new_block.receipts_hash = receipts_head
191
+ node_logger.debug("Block includes %d receipts", len(receipts))
192
+
193
+ account_atoms = []
194
+ if new_block.accounts is not None:
195
+ try:
196
+ account_atoms = new_block.accounts.update_trie(node)
197
+ new_block.accounts_hash = new_block.accounts.root_hash
198
+ node_logger.debug(
199
+ "Updated trie for %d cached accounts",
200
+ len(new_block.accounts._cache),
201
+ )
202
+ except Exception:
203
+ node_logger.exception("Failed to update accounts trie for block")
204
+
205
+ now = time.time()
206
+ min_allowed = new_block.previous_block.timestamp + 1
207
+ new_block.timestamp = max(int(now), min_allowed)
208
+
209
+ new_block.delay_difficulty = Block.calculate_delay_difficulty(
210
+ previous_timestamp=previous_block.timestamp,
211
+ current_timestamp=new_block.timestamp,
212
+ previous_difficulty=previous_block.delay_difficulty,
213
+ )
214
+
215
+ try:
216
+ new_block.generate_nonce(difficulty=previous_block.delay_difficulty)
217
+ node_logger.debug(
218
+ "Found nonce %s for block #%s at difficulty %s",
219
+ new_block.nonce,
220
+ new_block.number,
221
+ new_block.delay_difficulty,
222
+ )
223
+ except Exception:
224
+ node_logger.exception("Failed while searching for block nonce")
225
+ time.sleep(0.5)
226
+ continue
227
+
228
+ # atomize block
229
+ new_block_hash, new_block_atoms = new_block.to_atom()
230
+ # put as own latest block hash
231
+ node.latest_block_hash = new_block_hash
232
+ node.latest_block = new_block
233
+ node_logger.info(
234
+ "Validated block #%s with hash %s (%d atoms)",
235
+ new_block.number,
236
+ new_block_hash.hex(),
237
+ len(new_block_atoms),
238
+ )
239
+
240
+ # ping peers in the validation route to update their records
241
+ if node.validation_route and node.outgoing_queue and node.addresses:
242
+ route_peers = {
243
+ peer_key
244
+ for bucket in getattr(node.validation_route, "buckets", {}).values()
245
+ for peer_key in bucket
246
+ }
247
+ if route_peers:
248
+ ping_payload = Ping(
249
+ is_validator=True,
250
+ latest_block=new_block_hash,
251
+ ).to_bytes()
252
+
253
+ message_bytes = Message(
254
+ topic=MessageTopic.PING,
255
+ content=ping_payload,
256
+ ).to_bytes()
257
+
258
+ for address, peer_key in node.addresses.items():
259
+ if peer_key in route_peers:
260
+ try:
261
+ node.outgoing_queue.put((message_bytes, address))
262
+ node_logger.debug(
263
+ "Queued validator ping to %s (%s)",
264
+ address,
265
+ peer_key.hex()
266
+ if isinstance(peer_key, (bytes, bytearray))
267
+ else peer_key,
268
+ )
269
+ except Exception:
270
+ node_logger.exception(
271
+ "Failed queueing validator ping to %s", address
272
+ )
273
+
274
+ # upload block atoms
275
+ for block_atom in new_block_atoms:
276
+ atom_id = block_atom.object_id()
277
+ node._hot_storage_set(key=atom_id, value=block_atom)
278
+
279
+ # upload receipt atoms
280
+ for receipt_atom in receipt_atoms:
281
+ atom_id = receipt_atom.object_id()
282
+ node._hot_storage_set(key=atom_id, value=receipt_atom)
283
+
284
+ # upload account atoms
285
+ for account_atom in account_atoms:
286
+ atom_id = account_atom.object_id()
287
+ node._hot_storage_set(key=atom_id, value=account_atom)
288
+
289
+ node_logger.info("Validation worker stopped")
290
+
291
+ return _validation_worker
@@ -4,24 +4,35 @@ import time
4
4
  from queue import Empty
5
5
  from typing import Any, Set
6
6
 
7
- from ..fork import Fork
7
+ from ..models.fork import Fork
8
8
 
9
9
 
10
10
  def _process_peers_latest_block(
11
11
  node: Any, latest_block_hash: bytes, peer_ids: Set[Any]
12
12
  ) -> None:
13
13
  """Assign peers to the fork that matches their reported head."""
14
+ node_logger = node.logger
15
+ node_logger.debug(
16
+ "Processing %d peers reporting block %s",
17
+ len(peer_ids),
18
+ latest_block_hash.hex() if isinstance(latest_block_hash, (bytes, bytearray)) else latest_block_hash,
19
+ )
14
20
  new_fork = Fork(head=latest_block_hash)
15
21
 
16
22
  current_fork_heads = {
17
23
  fk.head for fk in node.forks.values() if fk.head != latest_block_hash
18
24
  }
19
25
 
20
- new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
26
+ new_fork.validate(storage_get=node.storage_get, stop_heads=current_fork_heads)
21
27
 
22
28
  if new_fork.validated_upto and new_fork.validated_upto in node.forks:
23
29
  ref = node.forks[new_fork.validated_upto]
24
30
  if getattr(ref, "malicious_block_hash", None):
31
+ node_logger.warning(
32
+ "Skipping fork from block %s referencing malicious fork %s",
33
+ latest_block_hash.hex() if isinstance(latest_block_hash, (bytes, bytearray)) else latest_block_hash,
34
+ new_fork.validated_upto.hex() if isinstance(new_fork.validated_upto, (bytes, bytearray)) else new_fork.validated_upto,
35
+ )
25
36
  return
26
37
  new_fork.root = ref.root
27
38
  new_fork.validated_upto = ref.validated_upto
@@ -34,12 +45,20 @@ def _process_peers_latest_block(
34
45
  fork.remove_peer(peer_id)
35
46
 
36
47
  node.forks[latest_block_hash] = new_fork
48
+ node_logger.debug(
49
+ "Fork %s now has %d peers (total forks %d)",
50
+ latest_block_hash.hex() if isinstance(latest_block_hash, (bytes, bytearray)) else latest_block_hash,
51
+ len(new_fork.peers),
52
+ len(node.forks),
53
+ )
37
54
 
38
55
 
39
56
  def make_verify_worker(node: Any):
40
57
  """Build the verify worker bound to the given node."""
41
58
 
42
59
  def _verify_worker() -> None:
60
+ node_logger = node.logger
61
+ node_logger.info("Verify worker started")
43
62
  stop = node._validation_stop_event
44
63
  while not stop.is_set():
45
64
  batch: list[tuple[bytes, Set[Any]]] = []
@@ -51,13 +70,23 @@ def make_verify_worker(node: Any):
51
70
  pass
52
71
 
53
72
  if not batch:
73
+ node_logger.debug("Verify queue empty; sleeping")
54
74
  time.sleep(0.1)
55
75
  continue
56
76
 
57
77
  for latest_b, peers in batch:
58
78
  try:
59
79
  _process_peers_latest_block(node, latest_b, peers)
80
+ node_logger.debug(
81
+ "Updated forks from block %s for %d peers",
82
+ latest_b.hex() if isinstance(latest_b, (bytes, bytearray)) else latest_b,
83
+ len(peers),
84
+ )
60
85
  except Exception:
61
- pass
86
+ latest_hex = (
87
+ latest_b.hex() if isinstance(latest_b, (bytes, bytearray)) else latest_b
88
+ )
89
+ node_logger.exception("Failed processing verification batch for %s", latest_hex)
90
+ node_logger.info("Verify worker stopped")
62
91
 
63
92
  return _verify_worker
@@ -0,0 +1,20 @@
1
+ from .models.expression import Expr
2
+ from .models.environment import Env
3
+ from .evaluations.low_evaluation import low_eval
4
+ from .models.meter import Meter
5
+ from .parser import parse, ParseError
6
+ from .tokenizer import tokenize
7
+ from .evaluations.high_evaluation import high_eval
8
+ from .evaluations.script_evaluation import script_eval
9
+
10
+ __all__ = [
11
+ "Env",
12
+ "Expr",
13
+ "low_eval",
14
+ "Meter",
15
+ "parse",
16
+ "tokenize",
17
+ "high_eval",
18
+ "ParseError",
19
+ "script_eval",
20
+ ]
File without changes
@@ -0,0 +1,237 @@
1
+ from typing import List, Optional, Union
2
+ import uuid
3
+
4
+ from ..models.environment import Env
5
+ from ..models.expression import Expr, error_expr, ERROR_SYMBOL
6
+ from ..models.meter import Meter
7
+
8
+
9
+ def _is_error(expr: Expr) -> bool:
10
+ return (
11
+ isinstance(expr, Expr.ListExpr)
12
+ and bool(expr.elements)
13
+ and isinstance(expr.elements[0], Expr.Symbol)
14
+ and expr.elements[0].value == ERROR_SYMBOL
15
+ )
16
+
17
+
18
+ def _hex_symbol_to_bytes(value: Optional[str]) -> Optional[bytes]:
19
+ if not value:
20
+ return None
21
+ data = value.strip()
22
+ if data.startswith(("0x", "0X")):
23
+ data = data[2:]
24
+ if len(data) % 2:
25
+ data = "0" + data
26
+ try:
27
+ return bytes.fromhex(data)
28
+ except ValueError:
29
+ return None
30
+
31
+
32
+ def _expr_to_bytes(expr: Expr) -> Optional[bytes]:
33
+ if isinstance(expr, Expr.Bytes):
34
+ return expr.value
35
+ if isinstance(expr, Expr.Symbol):
36
+ return _hex_symbol_to_bytes(expr.value)
37
+ return None
38
+
39
+
40
+ def high_eval(self, expr: Expr, env_id: Optional[uuid.UUID] = None, meter = None) -> Expr:
41
+ """Evaluate high-level expressions with scoped environments and metering."""
42
+ if meter is None:
43
+ meter = Meter()
44
+
45
+ call_env_id = uuid.uuid4()
46
+ self.environments[call_env_id] = Env(parent_id=env_id)
47
+ env_id = call_env_id
48
+
49
+ try:
50
+ # ---------- atoms ----------
51
+ if _is_error(expr):
52
+ return expr
53
+
54
+ if isinstance(expr, Expr.Symbol):
55
+ bound = self.env_get(env_id, expr.value)
56
+ if bound is None:
57
+ return error_expr("eval", f"unbound symbol '{expr.value}'")
58
+ return bound
59
+
60
+ if not isinstance(expr, Expr.ListExpr):
61
+ return expr # Expr.Byte or other literals passthrough
62
+
63
+ # ---------- empty / single ----------
64
+ if len(expr.elements) == 0:
65
+ return expr
66
+ if len(expr.elements) == 1:
67
+ return self.high_eval(expr=expr.elements[0], env_id=env_id, meter=meter)
68
+
69
+ tail = expr.elements[-1]
70
+
71
+ # ---------- (value name def) ----------
72
+ if isinstance(tail, Expr.Symbol) and tail.value == "def":
73
+ if len(expr.elements) < 3:
74
+ return error_expr("eval", "def expects (value name def)")
75
+ name_e = expr.elements[-2]
76
+ if not isinstance(name_e, Expr.Symbol):
77
+ return error_expr("eval", "def name must be symbol")
78
+ value_e = expr.elements[-3]
79
+ value_res = self.high_eval(expr=value_e, env_id=env_id, meter=meter)
80
+ if _is_error(value_res):
81
+ return value_res
82
+ self.env_set(call_env_id, name_e.value, value_res)
83
+ return value_res
84
+
85
+ # Reference Call
86
+ # (atom_id ref)
87
+ if isinstance(tail, Expr.Symbol) and tail.value == "ref":
88
+ if len(expr.elements) != 2:
89
+ return error_expr("eval", "ref expects (atom_id ref)")
90
+ key_bytes = _expr_to_bytes(expr.elements[0])
91
+ if not key_bytes:
92
+ return error_expr("eval", "ref expects (atom_id ref)")
93
+ stored_list = self.get_expr_list_from_storage(key_bytes)
94
+ if stored_list is None:
95
+ return error_expr("eval", "ref target not found")
96
+ return stored_list
97
+
98
+ # Low Level Call
99
+ # (arg1 arg2 ... ((body) sk))
100
+ if isinstance(tail, Expr.ListExpr):
101
+ inner = tail.elements
102
+ if len(inner) >= 2 and isinstance(inner[-1], Expr.Symbol) and inner[-1].value == "sk":
103
+ body_expr = inner[-2]
104
+ if not isinstance(body_expr, Expr.ListExpr):
105
+ return error_expr("eval", "sk body must be list")
106
+
107
+ # helper: turn an Expr into a contiguous bytes buffer
108
+ def to_bytes(v: Expr) -> Union[bytes, Expr]:
109
+ if isinstance(v, Expr.Byte):
110
+ return bytes([v.value & 0xFF])
111
+ if isinstance(v, Expr.ListExpr):
112
+ # expect a list of Expr.Byte
113
+ out: bytearray = bytearray()
114
+ for el in v.elements:
115
+ if isinstance(el, Expr.Byte):
116
+ out.append(el.value & 0xFF)
117
+ else:
118
+ return error_expr("eval", "byte list must contain only Byte elements")
119
+ return bytes(out)
120
+ if _is_error(v):
121
+ return v
122
+ return error_expr("eval", "argument must resolve to Byte or (Byte ...)")
123
+
124
+ # resolve ALL preceding args into bytes (can be Byte or List[Byte])
125
+ args_exprs = expr.elements[:-1]
126
+ arg_bytes: List[bytes] = []
127
+ for a in args_exprs:
128
+ v = self.high_eval(expr=a, env_id=env_id, meter=meter)
129
+ if _is_error(v):
130
+ return v
131
+ vb = to_bytes(v)
132
+ if not isinstance(vb, bytes):
133
+ if _is_error(vb):
134
+ return vb
135
+ return error_expr("eval", "unexpected expression while coercing to bytes")
136
+ arg_bytes.append(vb)
137
+
138
+ # build low-level code with $0-based placeholders ($0 = first arg)
139
+ code: List[bytes] = []
140
+
141
+ def emit(tok: Expr) -> Union[None, Expr]:
142
+ if isinstance(tok, Expr.Symbol):
143
+ name = tok.value
144
+ if name.startswith("$"):
145
+ idx_s = name[1:]
146
+ if not idx_s.isdigit():
147
+ return error_expr("eval", "invalid sk placeholder")
148
+ idx = int(idx_s) # $0 is first
149
+ if idx < 0 or idx >= len(arg_bytes):
150
+ return error_expr("eval", "arity mismatch in sk placeholder")
151
+ code.append(arg_bytes[idx])
152
+ return None
153
+ code.append(name.encode())
154
+ return None
155
+
156
+ if isinstance(tok, Expr.Byte):
157
+ code.append(bytes([tok.value & 0xFF]))
158
+ return None
159
+
160
+ if isinstance(tok, Expr.ListExpr):
161
+ rv = self.high_eval(expr=tok, env_id=env_id, meter=meter)
162
+ if _is_error(rv):
163
+ return rv
164
+ rb = to_bytes(rv)
165
+ if not isinstance(rb, bytes):
166
+ if _is_error(rb):
167
+ return rb
168
+ return error_expr("eval", "unexpected expression while coercing list token to bytes")
169
+ code.append(rb)
170
+ return None
171
+
172
+ if _is_error(tok):
173
+ return tok
174
+
175
+ return error_expr("eval", "invalid token in sk body")
176
+
177
+ for t in body_expr.elements:
178
+ err = emit(t)
179
+ if err is not None and _is_error(err):
180
+ return err
181
+
182
+ # Execute low-level code built from sk-body using the caller's meter
183
+ res = self.low_eval(code, meter=meter)
184
+ return res
185
+
186
+ # High Level Call
187
+ # (arg1 arg2 ... ((body) (params) fn))
188
+ if isinstance(tail, Expr.ListExpr):
189
+ fn_form = tail
190
+ if (len(fn_form.elements) >= 3
191
+ and isinstance(fn_form.elements[-1], Expr.Symbol)
192
+ and fn_form.elements[-1].value == "fn"):
193
+
194
+ body_expr = fn_form.elements[-3]
195
+ params_expr = fn_form.elements[-2]
196
+
197
+ if not isinstance(body_expr, Expr.ListExpr):
198
+ return error_expr("eval", "fn body must be list")
199
+ if not isinstance(params_expr, Expr.ListExpr):
200
+ return error_expr("eval", "fn params must be list")
201
+
202
+ params: List[str] = []
203
+ for p in params_expr.elements:
204
+ if not isinstance(p, Expr.Symbol):
205
+ return error_expr("eval", "fn param must be symbol")
206
+ params.append(p.value)
207
+
208
+ args_exprs = expr.elements[:-1]
209
+ if len(args_exprs) != len(params):
210
+ return error_expr("eval", "arity mismatch")
211
+
212
+ arg_bytes: List[bytes] = []
213
+ for a in args_exprs:
214
+ v = self.high_eval(expr=a, env_id=env_id, meter=meter)
215
+ if _is_error(v):
216
+ return v
217
+ if not isinstance(v, Expr.Byte):
218
+ return error_expr("eval", "argument must resolve to Byte")
219
+ arg_bytes.append(bytes([v.value & 0xFF]))
220
+
221
+ # child env, bind params -> Expr.Byte
222
+ child_env = uuid.uuid4()
223
+ self.environments[child_env] = Env(parent_id=env_id)
224
+ try:
225
+ for name_b, val_b in zip(params, arg_bytes):
226
+ self.env_set(child_env, name_b, Expr.Byte(val_b[0]))
227
+
228
+ # evaluate HL body, metered from the top
229
+ return self.high_eval(expr=body_expr, env_id=child_env, meter=meter)
230
+ finally:
231
+ self.environments.pop(child_env, None)
232
+
233
+ # ---------- default: resolve each element and return list ----------
234
+ resolved: List[Expr] = [self.high_eval(expr=e, env_id=env_id, meter=meter) for e in expr.elements]
235
+ return Expr.ListExpr(resolved)
236
+ finally:
237
+ self.environments.pop(call_env_id, None)