astreum 0.2.40__py3-none-any.whl → 0.2.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of astreum might be problematic. Click here for more details.
- astreum/_consensus/__init__.py +4 -0
- astreum/_consensus/account.py +95 -0
- astreum/_consensus/accounts.py +38 -0
- astreum/_consensus/block.py +9 -2
- astreum/_consensus/chain.py +65 -65
- astreum/_consensus/fork.py +99 -99
- astreum/_consensus/genesis.py +141 -0
- astreum/_consensus/receipt.py +11 -1
- astreum/_consensus/setup.py +15 -152
- astreum/_consensus/transaction.py +71 -23
- astreum/_consensus/workers/__init__.py +9 -0
- astreum/_consensus/workers/discovery.py +48 -0
- astreum/_consensus/workers/validation.py +122 -0
- astreum/_consensus/workers/verify.py +63 -0
- astreum/_storage/atom.py +24 -7
- astreum/models/block.py +22 -22
- astreum/node.py +755 -753
- astreum/utils/integer.py +25 -0
- {astreum-0.2.40.dist-info → astreum-0.2.42.dist-info}/METADATA +1 -1
- {astreum-0.2.40.dist-info → astreum-0.2.42.dist-info}/RECORD +23 -28
- astreum/lispeum/__init__.py +0 -0
- astreum/lispeum/environment.py +0 -40
- astreum/lispeum/expression.py +0 -86
- astreum/lispeum/parser.py +0 -41
- astreum/lispeum/tokenizer.py +0 -52
- astreum/models/account.py +0 -91
- astreum/models/accounts.py +0 -34
- astreum/models/transaction.py +0 -106
- astreum/relay/__init__.py +0 -0
- astreum/relay/peer.py +0 -9
- astreum/relay/route.py +0 -25
- astreum/relay/setup.py +0 -58
- {astreum-0.2.40.dist-info → astreum-0.2.42.dist-info}/WHEEL +0 -0
- {astreum-0.2.40.dist-info → astreum-0.2.42.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.2.40.dist-info → astreum-0.2.42.dist-info}/top_level.txt +0 -0
astreum/_consensus/setup.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import threading
|
|
4
|
-
import
|
|
5
|
-
from
|
|
6
|
-
from typing import Any, Dict, Optional, Tuple
|
|
4
|
+
from queue import Queue
|
|
5
|
+
from typing import Any
|
|
7
6
|
|
|
8
|
-
from .
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
7
|
+
from .workers import (
|
|
8
|
+
make_discovery_worker,
|
|
9
|
+
make_validation_worker,
|
|
10
|
+
make_verify_worker,
|
|
11
|
+
)
|
|
13
12
|
|
|
14
13
|
|
|
15
14
|
def current_validator(node: Any) -> bytes:
|
|
@@ -47,159 +46,23 @@ def consensus_setup(node: Any) -> None:
|
|
|
47
46
|
|
|
48
47
|
node.enqueue_transaction_hash = enqueue_transaction_hash
|
|
49
48
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
- Create a new Fork for `latest_block_hash` and validate it, using
|
|
55
|
-
stop_heads composed of current fork heads to short-circuit when
|
|
56
|
-
ancestry meets an existing fork head.
|
|
57
|
-
- If a matching fork head is found and is not malicious, copy its
|
|
58
|
-
structural fields (root, validated_upto, chain_fork_position) onto
|
|
59
|
-
the new fork.
|
|
60
|
-
- Add all peers in `peer_ids` to the new fork and remove each from any
|
|
61
|
-
previous fork they followed.
|
|
62
|
-
- Persist the new fork under `node.forks[latest_block_hash]`.
|
|
63
|
-
"""
|
|
64
|
-
new_fork = Fork(head=latest_block_hash)
|
|
65
|
-
|
|
66
|
-
current_fork_heads = {fk.head for fk in node.forks.values() if fk.head != latest_block_hash}
|
|
67
|
-
|
|
68
|
-
new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
|
|
69
|
-
|
|
70
|
-
# update new_fork with details of the fork with head of validated_upto
|
|
71
|
-
if new_fork.validated_upto and new_fork.validated_upto in node.forks:
|
|
72
|
-
ref = node.forks[new_fork.validated_upto]
|
|
73
|
-
# if the matched fork is malicious, disregard this new fork entirely
|
|
74
|
-
if getattr(ref, "malicious_block_hash", None):
|
|
75
|
-
return
|
|
76
|
-
# copy structural fields exactly
|
|
77
|
-
new_fork.root = ref.root
|
|
78
|
-
new_fork.validated_upto = ref.validated_upto
|
|
79
|
-
new_fork.chain_fork_position = ref.chain_fork_position
|
|
80
|
-
|
|
81
|
-
# add peers to new fork and remove them from any old forks
|
|
82
|
-
for peer_id in peer_ids:
|
|
83
|
-
new_fork.add_peer(peer_id)
|
|
84
|
-
# Remove this peer from all other forks
|
|
85
|
-
for h, fk in list(node.forks.items()):
|
|
86
|
-
if h != latest_block_hash:
|
|
87
|
-
fk.remove_peer(peer_id)
|
|
88
|
-
|
|
89
|
-
# persist the fork
|
|
90
|
-
node.forks[latest_block_hash] = new_fork
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
# Discovery worker: watches peers and enqueues head changes
|
|
94
|
-
def _discovery_worker():
|
|
95
|
-
stop = node._validation_stop_event
|
|
96
|
-
while not stop.is_set():
|
|
97
|
-
try:
|
|
98
|
-
peers = getattr(node, "peers", None)
|
|
99
|
-
if isinstance(peers, dict):
|
|
100
|
-
# Snapshot as (peer_id, latest_block_hash) pairs
|
|
101
|
-
pairs = [
|
|
102
|
-
(peer_id, bytes(latest))
|
|
103
|
-
for peer_id, peer in list(peers.items())
|
|
104
|
-
if isinstance((latest := getattr(peer, "latest_block", None)), (bytes, bytearray)) and latest
|
|
105
|
-
]
|
|
106
|
-
# Group peers by latest block hash
|
|
107
|
-
latest_keys = {hb for _, hb in pairs}
|
|
108
|
-
grouped: Dict[bytes, set[Any]] = {
|
|
109
|
-
hb: {pid for pid, phb in pairs if phb == hb}
|
|
110
|
-
for hb in latest_keys
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
# Replace queue contents with current groups
|
|
114
|
-
try:
|
|
115
|
-
while True:
|
|
116
|
-
node._validation_verify_queue.get_nowait()
|
|
117
|
-
except Empty:
|
|
118
|
-
pass
|
|
119
|
-
for latest_b, peer_set in grouped.items():
|
|
120
|
-
node._validation_verify_queue.put((latest_b, peer_set))
|
|
121
|
-
except Exception:
|
|
122
|
-
pass
|
|
123
|
-
finally:
|
|
124
|
-
time.sleep(0.5)
|
|
125
|
-
|
|
126
|
-
# Verification worker: computes root/height and applies peer→fork assignment
|
|
127
|
-
def _verify_worker():
|
|
128
|
-
stop = node._validation_stop_event
|
|
129
|
-
while not stop.is_set():
|
|
130
|
-
# Take a snapshot of all currently queued groups
|
|
131
|
-
batch: list[tuple[bytes, set[Any]]] = []
|
|
132
|
-
try:
|
|
133
|
-
while True:
|
|
134
|
-
item = node._validation_verify_queue.get_nowait()
|
|
135
|
-
batch.append(item)
|
|
136
|
-
except Empty:
|
|
137
|
-
pass
|
|
138
|
-
|
|
139
|
-
if not batch:
|
|
140
|
-
time.sleep(0.1)
|
|
141
|
-
continue
|
|
142
|
-
|
|
143
|
-
# Process the snapshot; new items enqueued during processing
|
|
144
|
-
# will be handled in the next iteration
|
|
145
|
-
for latest_b, peers in batch:
|
|
146
|
-
try:
|
|
147
|
-
_process_peers_latest_block(latest_b, peers)
|
|
148
|
-
except Exception:
|
|
149
|
-
pass
|
|
150
|
-
|
|
151
|
-
def _validation_worker() -> None:
|
|
152
|
-
"""Consume pending transactions when scheduled to validate."""
|
|
153
|
-
stop = node._validation_stop_event
|
|
154
|
-
while not stop.is_set():
|
|
155
|
-
validation_public_key = getattr(node, "validation_public_key", None)
|
|
156
|
-
if not validation_public_key:
|
|
157
|
-
time.sleep(0.5)
|
|
158
|
-
continue
|
|
159
|
-
|
|
160
|
-
scheduled_validator = current_validator(node)
|
|
161
|
-
|
|
162
|
-
if scheduled_validator != validation_public_key:
|
|
163
|
-
time.sleep(0.5)
|
|
164
|
-
continue
|
|
165
|
-
|
|
166
|
-
try:
|
|
167
|
-
current_hash = node._validation_transaction_queue.get_nowait()
|
|
168
|
-
except Empty:
|
|
169
|
-
time.sleep(0.1)
|
|
170
|
-
continue
|
|
171
|
-
|
|
172
|
-
new_block = Block()
|
|
173
|
-
new_block.validator_public_key = getattr(node, "validation_public_key", None)
|
|
174
|
-
|
|
175
|
-
while True:
|
|
176
|
-
try:
|
|
177
|
-
apply_transaction(node, new_block, current_hash)
|
|
178
|
-
except NotImplementedError:
|
|
179
|
-
node._validation_transaction_queue.put(current_hash)
|
|
180
|
-
time.sleep(0.5)
|
|
181
|
-
break
|
|
182
|
-
except Exception:
|
|
183
|
-
# Skip problematic transaction; leave block as-is.
|
|
184
|
-
pass
|
|
185
|
-
|
|
186
|
-
try:
|
|
187
|
-
current_hash = node._validation_transaction_queue.get_nowait()
|
|
188
|
-
except Empty:
|
|
189
|
-
break
|
|
49
|
+
verify_worker = make_verify_worker(node)
|
|
50
|
+
validation_worker = make_validation_worker(
|
|
51
|
+
node, current_validator=current_validator
|
|
52
|
+
)
|
|
190
53
|
|
|
191
54
|
# Start workers as daemons
|
|
55
|
+
discovery_worker = make_discovery_worker(node)
|
|
192
56
|
node.consensus_discovery_thread = threading.Thread(
|
|
193
|
-
target=
|
|
57
|
+
target=discovery_worker, daemon=True, name="consensus-discovery"
|
|
194
58
|
)
|
|
195
59
|
node.consensus_verify_thread = threading.Thread(
|
|
196
|
-
target=
|
|
60
|
+
target=verify_worker, daemon=True, name="consensus-verify"
|
|
197
61
|
)
|
|
198
62
|
node.consensus_validation_thread = threading.Thread(
|
|
199
|
-
target=
|
|
63
|
+
target=validation_worker, daemon=True, name="consensus-validation"
|
|
200
64
|
)
|
|
201
65
|
node.consensus_discovery_thread.start()
|
|
202
66
|
node.consensus_verify_thread.start()
|
|
203
67
|
if getattr(node, "validation_secret_key", None):
|
|
204
68
|
node.consensus_validation_thread.start()
|
|
205
|
-
|
|
@@ -1,25 +1,13 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
|
-
from typing import Any,
|
|
4
|
+
from typing import Any, List, Tuple
|
|
5
5
|
|
|
6
6
|
from .._storage.atom import Atom, ZERO32
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
return b""
|
|
12
|
-
value = int(value)
|
|
13
|
-
if value == 0:
|
|
14
|
-
return b"\x00"
|
|
15
|
-
size = (value.bit_length() + 7) // 8
|
|
16
|
-
return value.to_bytes(size, "big")
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
def _be_bytes_to_int(data: Optional[bytes]) -> int:
|
|
20
|
-
if not data:
|
|
21
|
-
return 0
|
|
22
|
-
return int.from_bytes(data, "big")
|
|
7
|
+
from ..utils.integer import bytes_to_int, int_to_bytes
|
|
8
|
+
from .account import Account
|
|
9
|
+
from .genesis import TREASURY_ADDRESS
|
|
10
|
+
from .receipt import STATUS_FAILED, Receipt, STATUS_SUCCESS
|
|
23
11
|
|
|
24
12
|
|
|
25
13
|
def _make_list(child_ids: List[bytes]) -> Tuple[bytes, List[Atom]]:
|
|
@@ -48,6 +36,7 @@ class Transaction:
|
|
|
48
36
|
recipient: bytes = b""
|
|
49
37
|
sender: bytes = b""
|
|
50
38
|
signature: bytes = b""
|
|
39
|
+
hash: bytes = ZERO32
|
|
51
40
|
|
|
52
41
|
def to_atom(self) -> Tuple[bytes, List[Atom]]:
|
|
53
42
|
"""Serialise the transaction, returning (object_id, atoms)."""
|
|
@@ -59,8 +48,8 @@ class Transaction:
|
|
|
59
48
|
body_child_ids.append(atom.object_id())
|
|
60
49
|
acc.append(atom)
|
|
61
50
|
|
|
62
|
-
emit(
|
|
63
|
-
emit(
|
|
51
|
+
emit(int_to_bytes(self.amount))
|
|
52
|
+
emit(int_to_bytes(self.counter))
|
|
64
53
|
emit(bytes(self.data))
|
|
65
54
|
emit(bytes(self.recipient))
|
|
66
55
|
emit(bytes(self.sender))
|
|
@@ -84,9 +73,13 @@ class Transaction:
|
|
|
84
73
|
@classmethod
|
|
85
74
|
def from_atom(
|
|
86
75
|
cls,
|
|
87
|
-
|
|
76
|
+
node: Any,
|
|
88
77
|
transaction_id: bytes,
|
|
89
78
|
) -> Transaction:
|
|
79
|
+
storage_get = node._local_get
|
|
80
|
+
if not callable(storage_get):
|
|
81
|
+
raise NotImplementedError("node does not expose a storage getter")
|
|
82
|
+
|
|
90
83
|
top_type_atom = storage_get(transaction_id)
|
|
91
84
|
if top_type_atom is None or top_type_atom.data != b"list":
|
|
92
85
|
raise ValueError("not a transaction (outer list missing)")
|
|
@@ -154,15 +147,70 @@ class Transaction:
|
|
|
154
147
|
signature_bytes = signature_atom.data if signature_atom is not None else b""
|
|
155
148
|
|
|
156
149
|
return cls(
|
|
157
|
-
amount=
|
|
158
|
-
counter=
|
|
150
|
+
amount=bytes_to_int(amount_bytes),
|
|
151
|
+
counter=bytes_to_int(counter_bytes),
|
|
159
152
|
data=data_bytes,
|
|
160
153
|
recipient=recipient_bytes,
|
|
161
154
|
sender=sender_bytes,
|
|
162
155
|
signature=signature_bytes,
|
|
156
|
+
hash=bytes(transaction_id),
|
|
163
157
|
)
|
|
164
158
|
|
|
165
159
|
|
|
166
160
|
def apply_transaction(node: Any, block: object, transaction_hash: bytes) -> None:
|
|
167
161
|
"""Apply transaction to the candidate block. Override downstream."""
|
|
168
|
-
|
|
162
|
+
transaction = Transaction.from_atom(node, transaction_hash)
|
|
163
|
+
|
|
164
|
+
accounts = block.accounts
|
|
165
|
+
|
|
166
|
+
sender_account = accounts.get_account(address=transaction.sender, node=node)
|
|
167
|
+
|
|
168
|
+
if sender_account is None:
|
|
169
|
+
return
|
|
170
|
+
|
|
171
|
+
tx_cost = 1 + transaction.amount
|
|
172
|
+
|
|
173
|
+
if sender_account.balance < tx_cost:
|
|
174
|
+
low_sender_balance_receipt = Receipt(
|
|
175
|
+
transaction_hash=transaction_hash,
|
|
176
|
+
cost=0,
|
|
177
|
+
logs=b"low sender balance",
|
|
178
|
+
status=STATUS_FAILED
|
|
179
|
+
)
|
|
180
|
+
low_sender_balance_receipt.atomize()
|
|
181
|
+
block.receipts.append(receipt)
|
|
182
|
+
block.transactions.append(transaction)
|
|
183
|
+
return
|
|
184
|
+
|
|
185
|
+
recipient_account = accounts.get_account(address=transaction.recipient, node=node)
|
|
186
|
+
|
|
187
|
+
if recipient_account is None:
|
|
188
|
+
recipient_account = Account.create()
|
|
189
|
+
|
|
190
|
+
if transaction.recipient == TREASURY_ADDRESS:
|
|
191
|
+
stake_trie = recipient_account.data
|
|
192
|
+
existing_stake = stake_trie.get(node, transaction.sender)
|
|
193
|
+
current_stake = bytes_to_int(existing_stake)
|
|
194
|
+
new_stake = current_stake + transaction.amount
|
|
195
|
+
stake_trie.put(node, transaction.sender, int_to_bytes(new_stake))
|
|
196
|
+
recipient_account.data_hash = stake_trie.root_hash or ZERO32
|
|
197
|
+
recipient_account.balance += transaction.amount
|
|
198
|
+
else:
|
|
199
|
+
recipient_account.balance += transaction.amount
|
|
200
|
+
|
|
201
|
+
sender_account.balance -= tx_cost
|
|
202
|
+
|
|
203
|
+
block.accounts.set_account(address=sender_account)
|
|
204
|
+
|
|
205
|
+
block.accounts.set_account(address=recipient_account)
|
|
206
|
+
|
|
207
|
+
block.transactions.append(transaction)
|
|
208
|
+
|
|
209
|
+
receipt = Receipt(
|
|
210
|
+
transaction_hash=bytes(transaction_hash),
|
|
211
|
+
cost=0,
|
|
212
|
+
logs=b"",
|
|
213
|
+
status=STATUS_SUCCESS,
|
|
214
|
+
)
|
|
215
|
+
receipt.atomize()
|
|
216
|
+
block.receipts.append(receipt)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Worker thread factories for the consensus subsystem.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .discovery import make_discovery_worker
|
|
6
|
+
from .validation import make_validation_worker
|
|
7
|
+
from .verify import make_verify_worker
|
|
8
|
+
|
|
9
|
+
__all__ = ["make_discovery_worker", "make_verify_worker", "make_validation_worker"]
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from queue import Empty
|
|
5
|
+
from typing import Any, Dict, Set, Tuple
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def make_discovery_worker(node: Any):
|
|
9
|
+
"""
|
|
10
|
+
Build the discovery worker bound to the given node.
|
|
11
|
+
|
|
12
|
+
The returned callable mirrors the previous inline worker in ``setup.py``.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def _discovery_worker() -> None:
|
|
16
|
+
stop = node._validation_stop_event
|
|
17
|
+
while not stop.is_set():
|
|
18
|
+
try:
|
|
19
|
+
peers = getattr(node, "peers", None)
|
|
20
|
+
if isinstance(peers, dict):
|
|
21
|
+
pairs: list[Tuple[Any, bytes]] = [
|
|
22
|
+
(peer_id, bytes(latest))
|
|
23
|
+
for peer_id, peer in list(peers.items())
|
|
24
|
+
if isinstance(
|
|
25
|
+
(latest := getattr(peer, "latest_block", None)),
|
|
26
|
+
(bytes, bytearray),
|
|
27
|
+
)
|
|
28
|
+
and latest
|
|
29
|
+
]
|
|
30
|
+
latest_keys: Set[bytes] = {hb for _, hb in pairs}
|
|
31
|
+
grouped: Dict[bytes, set[Any]] = {
|
|
32
|
+
hb: {pid for pid, phb in pairs if phb == hb}
|
|
33
|
+
for hb in latest_keys
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
while True:
|
|
38
|
+
node._validation_verify_queue.get_nowait()
|
|
39
|
+
except Empty:
|
|
40
|
+
pass
|
|
41
|
+
for latest_b, peer_set in grouped.items():
|
|
42
|
+
node._validation_verify_queue.put((latest_b, peer_set))
|
|
43
|
+
except Exception:
|
|
44
|
+
pass
|
|
45
|
+
finally:
|
|
46
|
+
time.sleep(0.5)
|
|
47
|
+
|
|
48
|
+
return _discovery_worker
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from queue import Empty
|
|
5
|
+
from typing import Any, Callable
|
|
6
|
+
|
|
7
|
+
from ..block import Block
|
|
8
|
+
from ..transaction import apply_transaction
|
|
9
|
+
from ..._storage.atom import bytes_list_to_atoms
|
|
10
|
+
from ..._storage.patricia import PatriciaTrie
|
|
11
|
+
from ..._communication.message import Message, MessageTopic
|
|
12
|
+
from ..._communication.ping import Ping
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def make_validation_worker(
|
|
16
|
+
node: Any,
|
|
17
|
+
*,
|
|
18
|
+
current_validator: Callable[[Any], bytes],
|
|
19
|
+
) -> Callable[[], None]:
|
|
20
|
+
"""Build the validation worker bound to the given node."""
|
|
21
|
+
|
|
22
|
+
def _validation_worker() -> None:
|
|
23
|
+
stop = node._validation_stop_event
|
|
24
|
+
while not stop.is_set():
|
|
25
|
+
validation_public_key = getattr(node, "validation_public_key", None)
|
|
26
|
+
if not validation_public_key:
|
|
27
|
+
time.sleep(0.5)
|
|
28
|
+
continue
|
|
29
|
+
|
|
30
|
+
scheduled_validator = current_validator(node)
|
|
31
|
+
|
|
32
|
+
if scheduled_validator != validation_public_key:
|
|
33
|
+
time.sleep(0.5)
|
|
34
|
+
continue
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
current_hash = node._validation_transaction_queue.get_nowait()
|
|
38
|
+
except Empty:
|
|
39
|
+
time.sleep(0.1)
|
|
40
|
+
continue
|
|
41
|
+
|
|
42
|
+
# create thread to perform vdf
|
|
43
|
+
|
|
44
|
+
new_block = Block()
|
|
45
|
+
new_block.validator_public_key = validation_public_key
|
|
46
|
+
new_block.previous_block_hash = node.latest_block_hash
|
|
47
|
+
try:
|
|
48
|
+
new_block.previous_block = Block.from_atom(node, new_block.previous_block_hash)
|
|
49
|
+
except Exception:
|
|
50
|
+
continue
|
|
51
|
+
new_block.accounts = PatriciaTrie(root_hash=new_block.previous_block.accounts_hash)
|
|
52
|
+
|
|
53
|
+
# we may want to add a timer to process part of the txs only on a slow computer
|
|
54
|
+
while True:
|
|
55
|
+
try:
|
|
56
|
+
apply_transaction(node, new_block, current_hash)
|
|
57
|
+
except NotImplementedError:
|
|
58
|
+
node._validation_transaction_queue.put(current_hash)
|
|
59
|
+
time.sleep(0.5)
|
|
60
|
+
break
|
|
61
|
+
except Exception:
|
|
62
|
+
pass
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
current_hash = node._validation_transaction_queue.get_nowait()
|
|
66
|
+
except Empty:
|
|
67
|
+
break
|
|
68
|
+
|
|
69
|
+
# create an atom list of transactions, save the list head hash as the block's transactions_hash
|
|
70
|
+
transactions = new_block.transactions or []
|
|
71
|
+
tx_hashes = [bytes(tx.hash) for tx in transactions if tx.hash]
|
|
72
|
+
head_hash, _ = bytes_list_to_atoms(tx_hashes)
|
|
73
|
+
new_block.transactions_hash = head_hash
|
|
74
|
+
|
|
75
|
+
receipts = new_block.receipts or []
|
|
76
|
+
receipt_hashes = [bytes(rcpt.hash) for rcpt in receipts if rcpt.hash]
|
|
77
|
+
receipts_head, _ = bytes_list_to_atoms(receipt_hashes)
|
|
78
|
+
new_block.receipts_hash = receipts_head
|
|
79
|
+
|
|
80
|
+
# get vdf result, default to 0 for now
|
|
81
|
+
|
|
82
|
+
# get timestamp or wait for a the next second from the previous block, rule is the next block must be atleast 1 second after the previous
|
|
83
|
+
now = time.time()
|
|
84
|
+
min_allowed = new_block.previous_block.timestamp + 1
|
|
85
|
+
if now < min_allowed:
|
|
86
|
+
time.sleep(max(0.0, min_allowed - now))
|
|
87
|
+
now = time.time()
|
|
88
|
+
new_block.timestamp = max(int(now), min_allowed)
|
|
89
|
+
|
|
90
|
+
# atomize block
|
|
91
|
+
new_block_hash, _ = new_block.to_atom()
|
|
92
|
+
# put as own latest block hash
|
|
93
|
+
node.latest_block_hash = new_block_hash
|
|
94
|
+
|
|
95
|
+
# ping peers in the validation route to update there records
|
|
96
|
+
if node.validation_route and node.outgoing_queue and node.addresses:
|
|
97
|
+
route_peers = {
|
|
98
|
+
peer_key
|
|
99
|
+
for bucket in getattr(node.validation_route, "buckets", {}).values()
|
|
100
|
+
for peer_key in bucket
|
|
101
|
+
}
|
|
102
|
+
if route_peers:
|
|
103
|
+
ping_payload = Ping(
|
|
104
|
+
is_validator=True,
|
|
105
|
+
latest_block=new_block_hash,
|
|
106
|
+
).to_bytes()
|
|
107
|
+
|
|
108
|
+
message_bytes = Message(
|
|
109
|
+
topic=MessageTopic.PING,
|
|
110
|
+
content=ping_payload,
|
|
111
|
+
).to_bytes()
|
|
112
|
+
|
|
113
|
+
for address, peer_key in node.addresses.items():
|
|
114
|
+
if peer_key in route_peers:
|
|
115
|
+
try:
|
|
116
|
+
node.outgoing_queue.put((message_bytes, address))
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
# store the new block and receipts
|
|
121
|
+
|
|
122
|
+
return _validation_worker
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from queue import Empty
|
|
5
|
+
from typing import Any, Set
|
|
6
|
+
|
|
7
|
+
from ..fork import Fork
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _process_peers_latest_block(
|
|
11
|
+
node: Any, latest_block_hash: bytes, peer_ids: Set[Any]
|
|
12
|
+
) -> None:
|
|
13
|
+
"""Assign peers to the fork that matches their reported head."""
|
|
14
|
+
new_fork = Fork(head=latest_block_hash)
|
|
15
|
+
|
|
16
|
+
current_fork_heads = {
|
|
17
|
+
fk.head for fk in node.forks.values() if fk.head != latest_block_hash
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
|
|
21
|
+
|
|
22
|
+
if new_fork.validated_upto and new_fork.validated_upto in node.forks:
|
|
23
|
+
ref = node.forks[new_fork.validated_upto]
|
|
24
|
+
if getattr(ref, "malicious_block_hash", None):
|
|
25
|
+
return
|
|
26
|
+
new_fork.root = ref.root
|
|
27
|
+
new_fork.validated_upto = ref.validated_upto
|
|
28
|
+
new_fork.chain_fork_position = ref.chain_fork_position
|
|
29
|
+
|
|
30
|
+
for peer_id in peer_ids:
|
|
31
|
+
new_fork.add_peer(peer_id)
|
|
32
|
+
for head, fork in list(node.forks.items()):
|
|
33
|
+
if head != latest_block_hash:
|
|
34
|
+
fork.remove_peer(peer_id)
|
|
35
|
+
|
|
36
|
+
node.forks[latest_block_hash] = new_fork
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def make_verify_worker(node: Any):
|
|
40
|
+
"""Build the verify worker bound to the given node."""
|
|
41
|
+
|
|
42
|
+
def _verify_worker() -> None:
|
|
43
|
+
stop = node._validation_stop_event
|
|
44
|
+
while not stop.is_set():
|
|
45
|
+
batch: list[tuple[bytes, Set[Any]]] = []
|
|
46
|
+
try:
|
|
47
|
+
while True:
|
|
48
|
+
latest_b, peers = node._validation_verify_queue.get_nowait()
|
|
49
|
+
batch.append((latest_b, peers))
|
|
50
|
+
except Empty:
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
if not batch:
|
|
54
|
+
time.sleep(0.1)
|
|
55
|
+
continue
|
|
56
|
+
|
|
57
|
+
for latest_b, peers in batch:
|
|
58
|
+
try:
|
|
59
|
+
_process_peers_latest_block(node, latest_b, peers)
|
|
60
|
+
except Exception:
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
return _verify_worker
|
astreum/_storage/atom.py
CHANGED
|
@@ -52,11 +52,11 @@ class Atom:
|
|
|
52
52
|
data = buf[len(ZERO32):]
|
|
53
53
|
return Atom(data=data, next=next_hash, size=len(data))
|
|
54
54
|
|
|
55
|
-
def expr_to_atoms(e: Expr) -> Tuple[bytes, List[Atom]]:
|
|
56
|
-
def symbol(value: str) -> Tuple[bytes, List[Atom]]:
|
|
57
|
-
val = value.encode("utf-8")
|
|
58
|
-
val_atom = Atom.from_data(data=val)
|
|
59
|
-
typ_atom = Atom.from_data(b"symbol", val_atom.object_id())
|
|
55
|
+
def expr_to_atoms(e: Expr) -> Tuple[bytes, List[Atom]]:
|
|
56
|
+
def symbol(value: str) -> Tuple[bytes, List[Atom]]:
|
|
57
|
+
val = value.encode("utf-8")
|
|
58
|
+
val_atom = Atom.from_data(data=val)
|
|
59
|
+
typ_atom = Atom.from_data(b"symbol", val_atom.object_id())
|
|
60
60
|
return typ_atom.object_id(), [val_atom, typ_atom]
|
|
61
61
|
|
|
62
62
|
def bytes(data: bytes) -> Tuple[bytes, List[Atom]]:
|
|
@@ -96,5 +96,22 @@ def expr_to_atoms(e: Expr) -> Tuple[bytes, List[Atom]]:
|
|
|
96
96
|
if isinstance(e, Expr.Error):
|
|
97
97
|
return err(e.topic, e.origin)
|
|
98
98
|
if isinstance(e, Expr.ListExpr):
|
|
99
|
-
return lst(e.elements)
|
|
100
|
-
raise TypeError("unknown Expr variant")
|
|
99
|
+
return lst(e.elements)
|
|
100
|
+
raise TypeError("unknown Expr variant")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def bytes_list_to_atoms(values: List[bytes]) -> Tuple[bytes, List[Atom]]:
|
|
104
|
+
"""Build a forward-ordered linked list of atoms from byte payloads.
|
|
105
|
+
|
|
106
|
+
Returns the head object's hash (ZERO32 if no values) and the atoms created.
|
|
107
|
+
"""
|
|
108
|
+
next_hash = ZERO32
|
|
109
|
+
atoms: List[Atom] = []
|
|
110
|
+
|
|
111
|
+
for value in reversed(values):
|
|
112
|
+
atom = Atom.from_data(data=bytes(value), next_hash=next_hash)
|
|
113
|
+
atoms.append(atom)
|
|
114
|
+
next_hash = atom.object_id()
|
|
115
|
+
|
|
116
|
+
atoms.reverse()
|
|
117
|
+
return (next_hash if values else ZERO32), atoms
|