astreum 0.2.61__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astreum/__init__.py +16 -7
- astreum/{_communication → communication}/__init__.py +3 -3
- astreum/communication/handlers/handshake.py +83 -0
- astreum/communication/handlers/ping.py +48 -0
- astreum/communication/handlers/storage_request.py +81 -0
- astreum/communication/models/__init__.py +0 -0
- astreum/{_communication → communication/models}/route.py +5 -5
- astreum/communication/setup.py +205 -0
- astreum/communication/start.py +38 -0
- astreum/consensus/__init__.py +20 -0
- astreum/consensus/genesis.py +66 -0
- astreum/consensus/models/__init__.py +0 -0
- astreum/consensus/models/account.py +84 -0
- astreum/consensus/models/accounts.py +72 -0
- astreum/consensus/models/block.py +364 -0
- astreum/{_consensus → consensus/models}/chain.py +7 -7
- astreum/{_consensus → consensus/models}/fork.py +8 -8
- astreum/consensus/models/receipt.py +98 -0
- astreum/{_consensus → consensus/models}/transaction.py +76 -78
- astreum/{_consensus → consensus}/setup.py +18 -50
- astreum/consensus/start.py +68 -0
- astreum/consensus/validator.py +95 -0
- astreum/{_consensus → consensus}/workers/discovery.py +20 -1
- astreum/consensus/workers/validation.py +291 -0
- astreum/{_consensus → consensus}/workers/verify.py +31 -2
- astreum/machine/__init__.py +20 -0
- astreum/machine/evaluations/__init__.py +0 -0
- astreum/{_lispeum → machine/evaluations}/high_evaluation.py +16 -15
- astreum/machine/evaluations/low_evaluation.py +281 -0
- astreum/machine/evaluations/script_evaluation.py +27 -0
- astreum/machine/models/__init__.py +0 -0
- astreum/machine/models/environment.py +31 -0
- astreum/{_lispeum → machine/models}/expression.py +36 -8
- astreum/machine/tokenizer.py +90 -0
- astreum/node.py +73 -781
- astreum/storage/__init__.py +7 -0
- astreum/storage/actions/get.py +69 -0
- astreum/storage/actions/set.py +132 -0
- astreum/{_storage → storage/models}/atom.py +55 -57
- astreum/{_storage/patricia.py → storage/models/trie.py} +227 -203
- astreum/storage/setup.py +44 -15
- {astreum-0.2.61.dist-info → astreum-0.3.1.dist-info}/METADATA +25 -24
- astreum-0.3.1.dist-info/RECORD +62 -0
- astreum/_communication/setup.py +0 -322
- astreum/_consensus/__init__.py +0 -20
- astreum/_consensus/account.py +0 -95
- astreum/_consensus/accounts.py +0 -38
- astreum/_consensus/block.py +0 -311
- astreum/_consensus/genesis.py +0 -72
- astreum/_consensus/receipt.py +0 -136
- astreum/_consensus/workers/validation.py +0 -125
- astreum/_lispeum/__init__.py +0 -16
- astreum/_lispeum/environment.py +0 -13
- astreum/_lispeum/low_evaluation.py +0 -123
- astreum/_lispeum/tokenizer.py +0 -22
- astreum/_node.py +0 -198
- astreum/_storage/__init__.py +0 -7
- astreum/_storage/setup.py +0 -35
- astreum/format.py +0 -75
- astreum/models/block.py +0 -441
- astreum/models/merkle.py +0 -205
- astreum/models/patricia.py +0 -393
- astreum/storage/object.py +0 -68
- astreum-0.2.61.dist-info/RECORD +0 -57
- /astreum/{models → communication/handlers}/__init__.py +0 -0
- /astreum/{_communication → communication/models}/message.py +0 -0
- /astreum/{_communication → communication/models}/peer.py +0 -0
- /astreum/{_communication → communication/models}/ping.py +0 -0
- /astreum/{_communication → communication}/util.py +0 -0
- /astreum/{_consensus → consensus}/workers/__init__.py +0 -0
- /astreum/{_lispeum → machine/models}/meter.py +0 -0
- /astreum/{_lispeum → machine}/parser.py +0 -0
- {astreum-0.2.61.dist-info → astreum-0.3.1.dist-info}/WHEEL +0 -0
- {astreum-0.2.61.dist-info → astreum-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.2.61.dist-info → astreum-0.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from queue import Empty
|
|
5
|
+
from typing import Any, Callable
|
|
6
|
+
|
|
7
|
+
from ..models.account import Account
|
|
8
|
+
from ..models.accounts import Accounts
|
|
9
|
+
from ..models.block import Block
|
|
10
|
+
from ..models.transaction import apply_transaction
|
|
11
|
+
from ..validator import current_validator
|
|
12
|
+
from ...storage.models.atom import bytes_list_to_atoms
|
|
13
|
+
from ...communication.models.message import Message, MessageTopic
|
|
14
|
+
from ...communication.models.ping import Ping
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def make_validation_worker(
|
|
18
|
+
node: Any,
|
|
19
|
+
) -> Callable[[], None]:
|
|
20
|
+
"""Build the validation worker bound to the given node."""
|
|
21
|
+
|
|
22
|
+
def _validation_worker() -> None:
|
|
23
|
+
node_logger = node.logger
|
|
24
|
+
node_logger.info("Validation worker started")
|
|
25
|
+
stop = node._validation_stop_event
|
|
26
|
+
|
|
27
|
+
def _award_validator_reward(block: Block, reward_amount: int) -> None:
|
|
28
|
+
"""Credit the validator account with the provided reward."""
|
|
29
|
+
if reward_amount <= 0:
|
|
30
|
+
return
|
|
31
|
+
accounts = getattr(block, "accounts", None)
|
|
32
|
+
validator_key = getattr(block, "validator_public_key", None)
|
|
33
|
+
if accounts is None or not validator_key:
|
|
34
|
+
node_logger.debug(
|
|
35
|
+
"Skipping validator reward; accounts snapshot or key missing"
|
|
36
|
+
)
|
|
37
|
+
return
|
|
38
|
+
try:
|
|
39
|
+
validator_account = accounts.get_account(
|
|
40
|
+
address=validator_key, node=node
|
|
41
|
+
)
|
|
42
|
+
except Exception:
|
|
43
|
+
node_logger.exception("Unable to load validator account for reward")
|
|
44
|
+
return
|
|
45
|
+
if validator_account is None:
|
|
46
|
+
validator_account = Account.create()
|
|
47
|
+
validator_account.balance += reward_amount
|
|
48
|
+
accounts.set_account(validator_key, validator_account)
|
|
49
|
+
|
|
50
|
+
while not stop.is_set():
|
|
51
|
+
validation_public_key = getattr(node, "validation_public_key", None)
|
|
52
|
+
if not validation_public_key:
|
|
53
|
+
node_logger.debug("Validation public key unavailable; sleeping")
|
|
54
|
+
time.sleep(0.5)
|
|
55
|
+
continue
|
|
56
|
+
|
|
57
|
+
latest_block_hash = getattr(node, "latest_block_hash", None)
|
|
58
|
+
if not isinstance(latest_block_hash, (bytes, bytearray)):
|
|
59
|
+
node_logger.warning("Missing latest_block_hash; retrying")
|
|
60
|
+
time.sleep(0.5)
|
|
61
|
+
continue
|
|
62
|
+
|
|
63
|
+
node_logger.debug(
|
|
64
|
+
"Querying current validator for block %s",
|
|
65
|
+
latest_block_hash.hex()
|
|
66
|
+
if isinstance(latest_block_hash, (bytes, bytearray))
|
|
67
|
+
else latest_block_hash,
|
|
68
|
+
)
|
|
69
|
+
try:
|
|
70
|
+
scheduled_validator, _ = current_validator(node, latest_block_hash)
|
|
71
|
+
except Exception as exc:
|
|
72
|
+
node_logger.exception("Unable to determine current validator: %s", exc)
|
|
73
|
+
time.sleep(0.5)
|
|
74
|
+
continue
|
|
75
|
+
|
|
76
|
+
if scheduled_validator != validation_public_key:
|
|
77
|
+
expected_hex = (
|
|
78
|
+
scheduled_validator.hex()
|
|
79
|
+
if isinstance(scheduled_validator, (bytes, bytearray))
|
|
80
|
+
else scheduled_validator
|
|
81
|
+
)
|
|
82
|
+
node_logger.debug("Current validator mismatch; expected %s", expected_hex)
|
|
83
|
+
time.sleep(0.5)
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
try:
|
|
87
|
+
previous_block = Block.from_atom(node, latest_block_hash)
|
|
88
|
+
except Exception:
|
|
89
|
+
node_logger.exception("Unable to load previous block for validation")
|
|
90
|
+
time.sleep(0.5)
|
|
91
|
+
continue
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
current_hash = node._validation_transaction_queue.get_nowait()
|
|
95
|
+
queue_empty = False
|
|
96
|
+
except Empty:
|
|
97
|
+
current_hash = None
|
|
98
|
+
queue_empty = True
|
|
99
|
+
node_logger.debug(
|
|
100
|
+
"No pending validation transactions; generating empty block"
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
accounts_snapshot = Accounts(root_hash=previous_block.accounts_hash)
|
|
105
|
+
except Exception:
|
|
106
|
+
accounts_snapshot = None
|
|
107
|
+
node_logger.warning("Unable to initialise accounts snapshot for block")
|
|
108
|
+
|
|
109
|
+
new_block = Block(
|
|
110
|
+
chain_id=getattr(node, "chain", 0),
|
|
111
|
+
previous_block_hash=latest_block_hash,
|
|
112
|
+
previous_block=previous_block,
|
|
113
|
+
number=(previous_block.number or 0) + 1,
|
|
114
|
+
timestamp=None,
|
|
115
|
+
accounts_hash=previous_block.accounts_hash,
|
|
116
|
+
transactions_total_fees=0,
|
|
117
|
+
transactions_hash=None,
|
|
118
|
+
receipts_hash=None,
|
|
119
|
+
delay_difficulty=None,
|
|
120
|
+
validator_public_key=validation_public_key,
|
|
121
|
+
nonce=0,
|
|
122
|
+
signature=None,
|
|
123
|
+
accounts=accounts_snapshot,
|
|
124
|
+
transactions=[],
|
|
125
|
+
receipts=[],
|
|
126
|
+
)
|
|
127
|
+
node_logger.debug(
|
|
128
|
+
"Creating block #%s extending %s",
|
|
129
|
+
new_block.number,
|
|
130
|
+
(
|
|
131
|
+
node.latest_block_hash.hex()
|
|
132
|
+
if isinstance(node.latest_block_hash, (bytes, bytearray))
|
|
133
|
+
else node.latest_block_hash
|
|
134
|
+
),
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# we may want to add a timer to process part of the txs only on a slow computer
|
|
138
|
+
total_fees = 0
|
|
139
|
+
while current_hash is not None:
|
|
140
|
+
try:
|
|
141
|
+
total_fees += apply_transaction(node, new_block, current_hash)
|
|
142
|
+
except NotImplementedError:
|
|
143
|
+
tx_hex = (
|
|
144
|
+
current_hash.hex()
|
|
145
|
+
if isinstance(current_hash, (bytes, bytearray))
|
|
146
|
+
else current_hash
|
|
147
|
+
)
|
|
148
|
+
node_logger.warning("Transaction %s unsupported; re-queued", tx_hex)
|
|
149
|
+
node._validation_transaction_queue.put(current_hash)
|
|
150
|
+
time.sleep(0.5)
|
|
151
|
+
break
|
|
152
|
+
except Exception:
|
|
153
|
+
tx_hex = (
|
|
154
|
+
current_hash.hex()
|
|
155
|
+
if isinstance(current_hash, (bytes, bytearray))
|
|
156
|
+
else current_hash
|
|
157
|
+
)
|
|
158
|
+
node_logger.exception("Failed applying transaction %s", tx_hex)
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
current_hash = node._validation_transaction_queue.get_nowait()
|
|
162
|
+
except Empty:
|
|
163
|
+
current_hash = None
|
|
164
|
+
|
|
165
|
+
new_block.transactions_total_fees = total_fees
|
|
166
|
+
reward_amount = total_fees if total_fees > 0 else 1
|
|
167
|
+
if total_fees == 0 and queue_empty:
|
|
168
|
+
node_logger.debug("Awarding base validator reward of 1 aster")
|
|
169
|
+
elif total_fees > 0:
|
|
170
|
+
node_logger.debug(
|
|
171
|
+
"Collected %d aster in transaction fees for this block", total_fees
|
|
172
|
+
)
|
|
173
|
+
_award_validator_reward(new_block, reward_amount)
|
|
174
|
+
|
|
175
|
+
# create an atom list of transactions, save the list head hash as the block's transactions_hash
|
|
176
|
+
transactions = new_block.transactions or []
|
|
177
|
+
tx_hashes = [bytes(tx.hash) for tx in transactions if tx.hash]
|
|
178
|
+
head_hash, _ = bytes_list_to_atoms(tx_hashes)
|
|
179
|
+
new_block.transactions_hash = head_hash
|
|
180
|
+
node_logger.debug("Block includes %d transactions", len(transactions))
|
|
181
|
+
|
|
182
|
+
receipts = new_block.receipts or []
|
|
183
|
+
receipt_atoms = []
|
|
184
|
+
receipt_hashes = []
|
|
185
|
+
for rcpt in receipts:
|
|
186
|
+
receipt_id, atoms = rcpt.to_atom()
|
|
187
|
+
receipt_atoms.extend(atoms)
|
|
188
|
+
receipt_hashes.append(bytes(receipt_id))
|
|
189
|
+
receipts_head, _ = bytes_list_to_atoms(receipt_hashes)
|
|
190
|
+
new_block.receipts_hash = receipts_head
|
|
191
|
+
node_logger.debug("Block includes %d receipts", len(receipts))
|
|
192
|
+
|
|
193
|
+
account_atoms = []
|
|
194
|
+
if new_block.accounts is not None:
|
|
195
|
+
try:
|
|
196
|
+
account_atoms = new_block.accounts.update_trie(node)
|
|
197
|
+
new_block.accounts_hash = new_block.accounts.root_hash
|
|
198
|
+
node_logger.debug(
|
|
199
|
+
"Updated trie for %d cached accounts",
|
|
200
|
+
len(new_block.accounts._cache),
|
|
201
|
+
)
|
|
202
|
+
except Exception:
|
|
203
|
+
node_logger.exception("Failed to update accounts trie for block")
|
|
204
|
+
|
|
205
|
+
now = time.time()
|
|
206
|
+
min_allowed = new_block.previous_block.timestamp + 1
|
|
207
|
+
new_block.timestamp = max(int(now), min_allowed)
|
|
208
|
+
|
|
209
|
+
new_block.delay_difficulty = Block.calculate_delay_difficulty(
|
|
210
|
+
previous_timestamp=previous_block.timestamp,
|
|
211
|
+
current_timestamp=new_block.timestamp,
|
|
212
|
+
previous_difficulty=previous_block.delay_difficulty,
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
try:
|
|
216
|
+
new_block.generate_nonce(difficulty=previous_block.delay_difficulty)
|
|
217
|
+
node_logger.debug(
|
|
218
|
+
"Found nonce %s for block #%s at difficulty %s",
|
|
219
|
+
new_block.nonce,
|
|
220
|
+
new_block.number,
|
|
221
|
+
new_block.delay_difficulty,
|
|
222
|
+
)
|
|
223
|
+
except Exception:
|
|
224
|
+
node_logger.exception("Failed while searching for block nonce")
|
|
225
|
+
time.sleep(0.5)
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
# atomize block
|
|
229
|
+
new_block_hash, new_block_atoms = new_block.to_atom()
|
|
230
|
+
# put as own latest block hash
|
|
231
|
+
node.latest_block_hash = new_block_hash
|
|
232
|
+
node.latest_block = new_block
|
|
233
|
+
node_logger.info(
|
|
234
|
+
"Validated block #%s with hash %s (%d atoms)",
|
|
235
|
+
new_block.number,
|
|
236
|
+
new_block_hash.hex(),
|
|
237
|
+
len(new_block_atoms),
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
# ping peers in the validation route to update their records
|
|
241
|
+
if node.validation_route and node.outgoing_queue and node.addresses:
|
|
242
|
+
route_peers = {
|
|
243
|
+
peer_key
|
|
244
|
+
for bucket in getattr(node.validation_route, "buckets", {}).values()
|
|
245
|
+
for peer_key in bucket
|
|
246
|
+
}
|
|
247
|
+
if route_peers:
|
|
248
|
+
ping_payload = Ping(
|
|
249
|
+
is_validator=True,
|
|
250
|
+
latest_block=new_block_hash,
|
|
251
|
+
).to_bytes()
|
|
252
|
+
|
|
253
|
+
message_bytes = Message(
|
|
254
|
+
topic=MessageTopic.PING,
|
|
255
|
+
content=ping_payload,
|
|
256
|
+
).to_bytes()
|
|
257
|
+
|
|
258
|
+
for address, peer_key in node.addresses.items():
|
|
259
|
+
if peer_key in route_peers:
|
|
260
|
+
try:
|
|
261
|
+
node.outgoing_queue.put((message_bytes, address))
|
|
262
|
+
node_logger.debug(
|
|
263
|
+
"Queued validator ping to %s (%s)",
|
|
264
|
+
address,
|
|
265
|
+
peer_key.hex()
|
|
266
|
+
if isinstance(peer_key, (bytes, bytearray))
|
|
267
|
+
else peer_key,
|
|
268
|
+
)
|
|
269
|
+
except Exception:
|
|
270
|
+
node_logger.exception(
|
|
271
|
+
"Failed queueing validator ping to %s", address
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# upload block atoms
|
|
275
|
+
for block_atom in new_block_atoms:
|
|
276
|
+
atom_id = block_atom.object_id()
|
|
277
|
+
node._hot_storage_set(key=atom_id, value=block_atom)
|
|
278
|
+
|
|
279
|
+
# upload receipt atoms
|
|
280
|
+
for receipt_atom in receipt_atoms:
|
|
281
|
+
atom_id = receipt_atom.object_id()
|
|
282
|
+
node._hot_storage_set(key=atom_id, value=receipt_atom)
|
|
283
|
+
|
|
284
|
+
# upload account atoms
|
|
285
|
+
for account_atom in account_atoms:
|
|
286
|
+
atom_id = account_atom.object_id()
|
|
287
|
+
node._hot_storage_set(key=atom_id, value=account_atom)
|
|
288
|
+
|
|
289
|
+
node_logger.info("Validation worker stopped")
|
|
290
|
+
|
|
291
|
+
return _validation_worker
|
|
@@ -4,13 +4,19 @@ import time
|
|
|
4
4
|
from queue import Empty
|
|
5
5
|
from typing import Any, Set
|
|
6
6
|
|
|
7
|
-
from ..fork import Fork
|
|
7
|
+
from ..models.fork import Fork
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
def _process_peers_latest_block(
|
|
11
11
|
node: Any, latest_block_hash: bytes, peer_ids: Set[Any]
|
|
12
12
|
) -> None:
|
|
13
13
|
"""Assign peers to the fork that matches their reported head."""
|
|
14
|
+
node_logger = node.logger
|
|
15
|
+
node_logger.debug(
|
|
16
|
+
"Processing %d peers reporting block %s",
|
|
17
|
+
len(peer_ids),
|
|
18
|
+
latest_block_hash.hex() if isinstance(latest_block_hash, (bytes, bytearray)) else latest_block_hash,
|
|
19
|
+
)
|
|
14
20
|
new_fork = Fork(head=latest_block_hash)
|
|
15
21
|
|
|
16
22
|
current_fork_heads = {
|
|
@@ -22,6 +28,11 @@ def _process_peers_latest_block(
|
|
|
22
28
|
if new_fork.validated_upto and new_fork.validated_upto in node.forks:
|
|
23
29
|
ref = node.forks[new_fork.validated_upto]
|
|
24
30
|
if getattr(ref, "malicious_block_hash", None):
|
|
31
|
+
node_logger.warning(
|
|
32
|
+
"Skipping fork from block %s referencing malicious fork %s",
|
|
33
|
+
latest_block_hash.hex() if isinstance(latest_block_hash, (bytes, bytearray)) else latest_block_hash,
|
|
34
|
+
new_fork.validated_upto.hex() if isinstance(new_fork.validated_upto, (bytes, bytearray)) else new_fork.validated_upto,
|
|
35
|
+
)
|
|
25
36
|
return
|
|
26
37
|
new_fork.root = ref.root
|
|
27
38
|
new_fork.validated_upto = ref.validated_upto
|
|
@@ -34,12 +45,20 @@ def _process_peers_latest_block(
|
|
|
34
45
|
fork.remove_peer(peer_id)
|
|
35
46
|
|
|
36
47
|
node.forks[latest_block_hash] = new_fork
|
|
48
|
+
node_logger.debug(
|
|
49
|
+
"Fork %s now has %d peers (total forks %d)",
|
|
50
|
+
latest_block_hash.hex() if isinstance(latest_block_hash, (bytes, bytearray)) else latest_block_hash,
|
|
51
|
+
len(new_fork.peers),
|
|
52
|
+
len(node.forks),
|
|
53
|
+
)
|
|
37
54
|
|
|
38
55
|
|
|
39
56
|
def make_verify_worker(node: Any):
|
|
40
57
|
"""Build the verify worker bound to the given node."""
|
|
41
58
|
|
|
42
59
|
def _verify_worker() -> None:
|
|
60
|
+
node_logger = node.logger
|
|
61
|
+
node_logger.info("Verify worker started")
|
|
43
62
|
stop = node._validation_stop_event
|
|
44
63
|
while not stop.is_set():
|
|
45
64
|
batch: list[tuple[bytes, Set[Any]]] = []
|
|
@@ -51,13 +70,23 @@ def make_verify_worker(node: Any):
|
|
|
51
70
|
pass
|
|
52
71
|
|
|
53
72
|
if not batch:
|
|
73
|
+
node_logger.debug("Verify queue empty; sleeping")
|
|
54
74
|
time.sleep(0.1)
|
|
55
75
|
continue
|
|
56
76
|
|
|
57
77
|
for latest_b, peers in batch:
|
|
58
78
|
try:
|
|
59
79
|
_process_peers_latest_block(node, latest_b, peers)
|
|
80
|
+
node_logger.debug(
|
|
81
|
+
"Updated forks from block %s for %d peers",
|
|
82
|
+
latest_b.hex() if isinstance(latest_b, (bytes, bytearray)) else latest_b,
|
|
83
|
+
len(peers),
|
|
84
|
+
)
|
|
60
85
|
except Exception:
|
|
61
|
-
|
|
86
|
+
latest_hex = (
|
|
87
|
+
latest_b.hex() if isinstance(latest_b, (bytes, bytearray)) else latest_b
|
|
88
|
+
)
|
|
89
|
+
node_logger.exception("Failed processing verification batch for %s", latest_hex)
|
|
90
|
+
node_logger.info("Verify worker stopped")
|
|
62
91
|
|
|
63
92
|
return _verify_worker
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from .models.expression import Expr
|
|
2
|
+
from .models.environment import Env
|
|
3
|
+
from .evaluations.low_evaluation import low_eval
|
|
4
|
+
from .models.meter import Meter
|
|
5
|
+
from .parser import parse, ParseError
|
|
6
|
+
from .tokenizer import tokenize
|
|
7
|
+
from .evaluations.high_evaluation import high_eval
|
|
8
|
+
from .evaluations.script_evaluation import script_eval
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"Env",
|
|
12
|
+
"Expr",
|
|
13
|
+
"low_eval",
|
|
14
|
+
"Meter",
|
|
15
|
+
"parse",
|
|
16
|
+
"tokenize",
|
|
17
|
+
"high_eval",
|
|
18
|
+
"ParseError",
|
|
19
|
+
"script_eval",
|
|
20
|
+
]
|
|
File without changes
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
from typing import List, Optional, Union
|
|
2
2
|
import uuid
|
|
3
3
|
|
|
4
|
-
from .environment import Env
|
|
5
|
-
from .expression import Expr, error_expr, ERROR_SYMBOL
|
|
6
|
-
from .meter import Meter
|
|
4
|
+
from ..models.environment import Env
|
|
5
|
+
from ..models.expression import Expr, error_expr, ERROR_SYMBOL
|
|
6
|
+
from ..models.meter import Meter
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
def _is_error(expr: Expr) -> bool:
|
|
@@ -37,7 +37,8 @@ def _expr_to_bytes(expr: Expr) -> Optional[bytes]:
|
|
|
37
37
|
return None
|
|
38
38
|
|
|
39
39
|
|
|
40
|
-
def high_eval(self, env_id: uuid.UUID
|
|
40
|
+
def high_eval(self, expr: Expr, env_id: Optional[uuid.UUID] = None, meter = None) -> Expr:
|
|
41
|
+
"""Evaluate high-level expressions with scoped environments and metering."""
|
|
41
42
|
if meter is None:
|
|
42
43
|
meter = Meter()
|
|
43
44
|
|
|
@@ -51,7 +52,7 @@ def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
|
51
52
|
return expr
|
|
52
53
|
|
|
53
54
|
if isinstance(expr, Expr.Symbol):
|
|
54
|
-
bound = self.env_get(env_id, expr.value
|
|
55
|
+
bound = self.env_get(env_id, expr.value)
|
|
55
56
|
if bound is None:
|
|
56
57
|
return error_expr("eval", f"unbound symbol '{expr.value}'")
|
|
57
58
|
return bound
|
|
@@ -63,7 +64,7 @@ def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
|
63
64
|
if len(expr.elements) == 0:
|
|
64
65
|
return expr
|
|
65
66
|
if len(expr.elements) == 1:
|
|
66
|
-
return self.high_eval(
|
|
67
|
+
return self.high_eval(expr=expr.elements[0], env_id=env_id, meter=meter)
|
|
67
68
|
|
|
68
69
|
tail = expr.elements[-1]
|
|
69
70
|
|
|
@@ -75,10 +76,10 @@ def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
|
75
76
|
if not isinstance(name_e, Expr.Symbol):
|
|
76
77
|
return error_expr("eval", "def name must be symbol")
|
|
77
78
|
value_e = expr.elements[-3]
|
|
78
|
-
value_res = self.high_eval(
|
|
79
|
+
value_res = self.high_eval(expr=value_e, env_id=env_id, meter=meter)
|
|
79
80
|
if _is_error(value_res):
|
|
80
81
|
return value_res
|
|
81
|
-
self.env_set(
|
|
82
|
+
self.env_set(call_env_id, name_e.value, value_res)
|
|
82
83
|
return value_res
|
|
83
84
|
|
|
84
85
|
# Reference Call
|
|
@@ -124,7 +125,7 @@ def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
|
124
125
|
args_exprs = expr.elements[:-1]
|
|
125
126
|
arg_bytes: List[bytes] = []
|
|
126
127
|
for a in args_exprs:
|
|
127
|
-
v = self.high_eval(
|
|
128
|
+
v = self.high_eval(expr=a, env_id=env_id, meter=meter)
|
|
128
129
|
if _is_error(v):
|
|
129
130
|
return v
|
|
130
131
|
vb = to_bytes(v)
|
|
@@ -157,7 +158,7 @@ def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
|
157
158
|
return None
|
|
158
159
|
|
|
159
160
|
if isinstance(tok, Expr.ListExpr):
|
|
160
|
-
rv = self.high_eval(
|
|
161
|
+
rv = self.high_eval(expr=tok, env_id=env_id, meter=meter)
|
|
161
162
|
if _is_error(rv):
|
|
162
163
|
return rv
|
|
163
164
|
rb = to_bytes(rv)
|
|
@@ -198,11 +199,11 @@ def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
|
198
199
|
if not isinstance(params_expr, Expr.ListExpr):
|
|
199
200
|
return error_expr("eval", "fn params must be list")
|
|
200
201
|
|
|
201
|
-
params: List[
|
|
202
|
+
params: List[str] = []
|
|
202
203
|
for p in params_expr.elements:
|
|
203
204
|
if not isinstance(p, Expr.Symbol):
|
|
204
205
|
return error_expr("eval", "fn param must be symbol")
|
|
205
|
-
params.append(p.value
|
|
206
|
+
params.append(p.value)
|
|
206
207
|
|
|
207
208
|
args_exprs = expr.elements[:-1]
|
|
208
209
|
if len(args_exprs) != len(params):
|
|
@@ -210,7 +211,7 @@ def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
|
210
211
|
|
|
211
212
|
arg_bytes: List[bytes] = []
|
|
212
213
|
for a in args_exprs:
|
|
213
|
-
v = self.high_eval(
|
|
214
|
+
v = self.high_eval(expr=a, env_id=env_id, meter=meter)
|
|
214
215
|
if _is_error(v):
|
|
215
216
|
return v
|
|
216
217
|
if not isinstance(v, Expr.Byte):
|
|
@@ -225,12 +226,12 @@ def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
|
225
226
|
self.env_set(child_env, name_b, Expr.Byte(val_b[0]))
|
|
226
227
|
|
|
227
228
|
# evaluate HL body, metered from the top
|
|
228
|
-
return self.high_eval(
|
|
229
|
+
return self.high_eval(expr=body_expr, env_id=child_env, meter=meter)
|
|
229
230
|
finally:
|
|
230
231
|
self.environments.pop(child_env, None)
|
|
231
232
|
|
|
232
233
|
# ---------- default: resolve each element and return list ----------
|
|
233
|
-
resolved: List[Expr] = [self.high_eval(
|
|
234
|
+
resolved: List[Expr] = [self.high_eval(expr=e, env_id=env_id, meter=meter) for e in expr.elements]
|
|
234
235
|
return Expr.ListExpr(resolved)
|
|
235
236
|
finally:
|
|
236
237
|
self.environments.pop(call_env_id, None)
|