astreum 0.2.39__py3-none-any.whl → 0.2.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astreum/_communication/__init__.py +2 -0
- astreum/{models → _communication}/message.py +100 -64
- astreum/_communication/ping.py +33 -0
- astreum/_communication/route.py +53 -20
- astreum/_communication/setup.py +240 -99
- astreum/_communication/util.py +42 -0
- astreum/_consensus/__init__.py +6 -0
- astreum/_consensus/account.py +170 -0
- astreum/_consensus/accounts.py +67 -0
- astreum/_consensus/block.py +84 -52
- astreum/_consensus/chain.py +65 -62
- astreum/_consensus/fork.py +99 -97
- astreum/_consensus/genesis.py +141 -0
- astreum/_consensus/receipt.py +177 -0
- astreum/_consensus/setup.py +21 -162
- astreum/_consensus/transaction.py +43 -23
- astreum/_consensus/workers/__init__.py +9 -0
- astreum/_consensus/workers/discovery.py +48 -0
- astreum/_consensus/workers/validation.py +122 -0
- astreum/_consensus/workers/verify.py +63 -0
- astreum/_storage/atom.py +24 -7
- astreum/_storage/patricia.py +443 -0
- astreum/models/block.py +10 -10
- astreum/node.py +755 -753
- {astreum-0.2.39.dist-info → astreum-0.2.41.dist-info}/METADATA +1 -1
- astreum-0.2.41.dist-info/RECORD +53 -0
- astreum/lispeum/__init__.py +0 -0
- astreum/lispeum/environment.py +0 -40
- astreum/lispeum/expression.py +0 -86
- astreum/lispeum/parser.py +0 -41
- astreum/lispeum/tokenizer.py +0 -52
- astreum/models/account.py +0 -91
- astreum/models/accounts.py +0 -34
- astreum/models/transaction.py +0 -106
- astreum/relay/__init__.py +0 -0
- astreum/relay/peer.py +0 -9
- astreum/relay/route.py +0 -25
- astreum/relay/setup.py +0 -58
- astreum-0.2.39.dist-info/RECORD +0 -55
- {astreum-0.2.39.dist-info → astreum-0.2.41.dist-info}/WHEEL +0 -0
- {astreum-0.2.39.dist-info → astreum-0.2.41.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.2.39.dist-info → astreum-0.2.41.dist-info}/top_level.txt +0 -0
astreum/_consensus/setup.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import threading
|
|
4
|
-
import
|
|
5
|
-
from
|
|
6
|
-
from typing import Any, Dict, Optional, Tuple
|
|
4
|
+
from queue import Queue
|
|
5
|
+
from typing import Any
|
|
7
6
|
|
|
8
|
-
from .
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
7
|
+
from .workers import (
|
|
8
|
+
make_discovery_worker,
|
|
9
|
+
make_validation_worker,
|
|
10
|
+
make_verify_worker,
|
|
11
|
+
)
|
|
13
12
|
|
|
14
13
|
|
|
15
14
|
def current_validator(node: Any) -> bytes:
|
|
@@ -17,11 +16,6 @@ def current_validator(node: Any) -> bytes:
|
|
|
17
16
|
raise NotImplementedError("current_validator must be implemented by the host node")
|
|
18
17
|
|
|
19
18
|
|
|
20
|
-
def apply_transaction(node: Any, block: object, transaction_hash: bytes) -> None:
|
|
21
|
-
"""Apply transaction to the candidate block. Override downstream."""
|
|
22
|
-
pass
|
|
23
|
-
|
|
24
|
-
|
|
25
19
|
def consensus_setup(node: Any) -> None:
|
|
26
20
|
# Shared state
|
|
27
21
|
node.validation_lock = getattr(node, "validation_lock", threading.RLock())
|
|
@@ -52,158 +46,23 @@ def consensus_setup(node: Any) -> None:
|
|
|
52
46
|
|
|
53
47
|
node.enqueue_transaction_hash = enqueue_transaction_hash
|
|
54
48
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
- Create a new Fork for `latest_block_hash` and validate it, using
|
|
60
|
-
stop_heads composed of current fork heads to short-circuit when
|
|
61
|
-
ancestry meets an existing fork head.
|
|
62
|
-
- If a matching fork head is found and is not malicious, copy its
|
|
63
|
-
structural fields (root, validated_upto, chain_fork_position) onto
|
|
64
|
-
the new fork.
|
|
65
|
-
- Add all peers in `peer_ids` to the new fork and remove each from any
|
|
66
|
-
previous fork they followed.
|
|
67
|
-
- Persist the new fork under `node.forks[latest_block_hash]`.
|
|
68
|
-
"""
|
|
69
|
-
new_fork = Fork(head=latest_block_hash)
|
|
70
|
-
|
|
71
|
-
current_fork_heads = {fk.head for fk in node.forks.values() if fk.head != latest_block_hash}
|
|
72
|
-
|
|
73
|
-
new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
|
|
74
|
-
|
|
75
|
-
# update new_fork with details of the fork with head of validated_upto
|
|
76
|
-
if new_fork.validated_upto and new_fork.validated_upto in node.forks:
|
|
77
|
-
ref = node.forks[new_fork.validated_upto]
|
|
78
|
-
# if the matched fork is malicious, disregard this new fork entirely
|
|
79
|
-
if getattr(ref, "malicious_block_hash", None):
|
|
80
|
-
return
|
|
81
|
-
# copy structural fields exactly
|
|
82
|
-
new_fork.root = ref.root
|
|
83
|
-
new_fork.validated_upto = ref.validated_upto
|
|
84
|
-
new_fork.chain_fork_position = ref.chain_fork_position
|
|
85
|
-
|
|
86
|
-
# add peers to new fork and remove them from any old forks
|
|
87
|
-
for peer_id in peer_ids:
|
|
88
|
-
new_fork.add_peer(peer_id)
|
|
89
|
-
# Remove this peer from all other forks
|
|
90
|
-
for h, fk in list(node.forks.items()):
|
|
91
|
-
if h != latest_block_hash:
|
|
92
|
-
fk.remove_peer(peer_id)
|
|
93
|
-
|
|
94
|
-
# persist the fork
|
|
95
|
-
node.forks[latest_block_hash] = new_fork
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
# Discovery worker: watches peers and enqueues head changes
|
|
99
|
-
def _discovery_worker():
|
|
100
|
-
stop = node._validation_stop_event
|
|
101
|
-
while not stop.is_set():
|
|
102
|
-
try:
|
|
103
|
-
peers = getattr(node, "peers", None)
|
|
104
|
-
if isinstance(peers, dict):
|
|
105
|
-
# Snapshot as (peer_id, latest_block_hash) pairs
|
|
106
|
-
pairs = [
|
|
107
|
-
(peer_id, bytes(latest))
|
|
108
|
-
for peer_id, peer in list(peers.items())
|
|
109
|
-
if isinstance((latest := getattr(peer, "latest_block", None)), (bytes, bytearray)) and latest
|
|
110
|
-
]
|
|
111
|
-
# Group peers by latest block hash
|
|
112
|
-
latest_keys = {hb for _, hb in pairs}
|
|
113
|
-
grouped: Dict[bytes, set[Any]] = {
|
|
114
|
-
hb: {pid for pid, phb in pairs if phb == hb}
|
|
115
|
-
for hb in latest_keys
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
# Replace queue contents with current groups
|
|
119
|
-
try:
|
|
120
|
-
while True:
|
|
121
|
-
node._validation_verify_queue.get_nowait()
|
|
122
|
-
except Empty:
|
|
123
|
-
pass
|
|
124
|
-
for latest_b, peer_set in grouped.items():
|
|
125
|
-
node._validation_verify_queue.put((latest_b, peer_set))
|
|
126
|
-
except Exception:
|
|
127
|
-
pass
|
|
128
|
-
finally:
|
|
129
|
-
time.sleep(0.5)
|
|
130
|
-
|
|
131
|
-
# Verification worker: computes root/height and applies peer→fork assignment
|
|
132
|
-
def _verify_worker():
|
|
133
|
-
stop = node._validation_stop_event
|
|
134
|
-
while not stop.is_set():
|
|
135
|
-
# Take a snapshot of all currently queued groups
|
|
136
|
-
batch: list[tuple[bytes, set[Any]]] = []
|
|
137
|
-
try:
|
|
138
|
-
while True:
|
|
139
|
-
item = node._validation_verify_queue.get_nowait()
|
|
140
|
-
batch.append(item)
|
|
141
|
-
except Empty:
|
|
142
|
-
pass
|
|
143
|
-
|
|
144
|
-
if not batch:
|
|
145
|
-
time.sleep(0.1)
|
|
146
|
-
continue
|
|
147
|
-
|
|
148
|
-
# Process the snapshot; new items enqueued during processing
|
|
149
|
-
# will be handled in the next iteration
|
|
150
|
-
for latest_b, peers in batch:
|
|
151
|
-
try:
|
|
152
|
-
_process_peers_latest_block(latest_b, peers)
|
|
153
|
-
except Exception:
|
|
154
|
-
pass
|
|
155
|
-
|
|
156
|
-
def _validation_worker() -> None:
|
|
157
|
-
"""Consume pending transactions when scheduled to validate."""
|
|
158
|
-
stop = node._validation_stop_event
|
|
159
|
-
while not stop.is_set():
|
|
160
|
-
validation_public_key = getattr(node, "validation_public_key", None)
|
|
161
|
-
if not validation_public_key:
|
|
162
|
-
time.sleep(0.5)
|
|
163
|
-
continue
|
|
164
|
-
|
|
165
|
-
scheduled_validator = current_validator(node)
|
|
166
|
-
|
|
167
|
-
if scheduled_validator != validation_public_key:
|
|
168
|
-
time.sleep(0.5)
|
|
169
|
-
continue
|
|
170
|
-
|
|
171
|
-
try:
|
|
172
|
-
current_hash = node._validation_transaction_queue.get_nowait()
|
|
173
|
-
except Empty:
|
|
174
|
-
time.sleep(0.1)
|
|
175
|
-
continue
|
|
176
|
-
|
|
177
|
-
new_block = Block()
|
|
178
|
-
new_block.validator_public_key = getattr(node, "validation_public_key", None)
|
|
179
|
-
|
|
180
|
-
while True:
|
|
181
|
-
try:
|
|
182
|
-
apply_transaction(node, new_block, current_hash)
|
|
183
|
-
except NotImplementedError:
|
|
184
|
-
node._validation_transaction_queue.put(current_hash)
|
|
185
|
-
time.sleep(0.5)
|
|
186
|
-
break
|
|
187
|
-
except Exception:
|
|
188
|
-
# Skip problematic transaction; leave block as-is.
|
|
189
|
-
pass
|
|
190
|
-
|
|
191
|
-
try:
|
|
192
|
-
current_hash = node._validation_transaction_queue.get_nowait()
|
|
193
|
-
except Empty:
|
|
194
|
-
break
|
|
49
|
+
verify_worker = make_verify_worker(node)
|
|
50
|
+
validation_worker = make_validation_worker(
|
|
51
|
+
node, current_validator=current_validator
|
|
52
|
+
)
|
|
195
53
|
|
|
196
54
|
# Start workers as daemons
|
|
197
|
-
|
|
198
|
-
|
|
55
|
+
discovery_worker = make_discovery_worker(node)
|
|
56
|
+
node.consensus_discovery_thread = threading.Thread(
|
|
57
|
+
target=discovery_worker, daemon=True, name="consensus-discovery"
|
|
199
58
|
)
|
|
200
|
-
node.
|
|
201
|
-
target=
|
|
59
|
+
node.consensus_verify_thread = threading.Thread(
|
|
60
|
+
target=verify_worker, daemon=True, name="consensus-verify"
|
|
202
61
|
)
|
|
203
|
-
node.
|
|
204
|
-
target=
|
|
62
|
+
node.consensus_validation_thread = threading.Thread(
|
|
63
|
+
target=validation_worker, daemon=True, name="consensus-validation"
|
|
205
64
|
)
|
|
206
|
-
node.
|
|
207
|
-
node.
|
|
65
|
+
node.consensus_discovery_thread.start()
|
|
66
|
+
node.consensus_verify_thread.start()
|
|
208
67
|
if getattr(node, "validation_secret_key", None):
|
|
209
|
-
node.
|
|
68
|
+
node.consensus_validation_thread.start()
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
|
-
from typing import
|
|
4
|
+
from typing import Any, List, Optional, Tuple
|
|
5
5
|
|
|
6
6
|
from .._storage.atom import Atom, ZERO32
|
|
7
|
+
from .receipt import Receipt, STATUS_SUCCESS
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
def _int_to_be_bytes(value: Optional[int]) -> bytes:
|
|
@@ -22,12 +23,6 @@ def _be_bytes_to_int(data: Optional[bytes]) -> int:
|
|
|
22
23
|
return int.from_bytes(data, "big")
|
|
23
24
|
|
|
24
25
|
|
|
25
|
-
def _make_typed_bytes(payload: bytes) -> Tuple[bytes, List[Atom]]:
|
|
26
|
-
value_atom = Atom.from_data(data=payload)
|
|
27
|
-
type_atom = Atom.from_data(data=b"byte", next_hash=value_atom.object_id())
|
|
28
|
-
return type_atom.object_id(), [value_atom, type_atom]
|
|
29
|
-
|
|
30
|
-
|
|
31
26
|
def _make_list(child_ids: List[bytes]) -> Tuple[bytes, List[Atom]]:
|
|
32
27
|
atoms: List[Atom] = []
|
|
33
28
|
next_hash = ZERO32
|
|
@@ -54,6 +49,7 @@ class Transaction:
|
|
|
54
49
|
recipient: bytes = b""
|
|
55
50
|
sender: bytes = b""
|
|
56
51
|
signature: bytes = b""
|
|
52
|
+
hash: bytes = ZERO32
|
|
57
53
|
|
|
58
54
|
def to_atom(self) -> Tuple[bytes, List[Atom]]:
|
|
59
55
|
"""Serialise the transaction, returning (object_id, atoms)."""
|
|
@@ -61,9 +57,9 @@ class Transaction:
|
|
|
61
57
|
acc: List[Atom] = []
|
|
62
58
|
|
|
63
59
|
def emit(payload: bytes) -> None:
|
|
64
|
-
|
|
65
|
-
body_child_ids.append(
|
|
66
|
-
acc.
|
|
60
|
+
atom = Atom.from_data(data=payload)
|
|
61
|
+
body_child_ids.append(atom.object_id())
|
|
62
|
+
acc.append(atom)
|
|
67
63
|
|
|
68
64
|
emit(_int_to_be_bytes(self.amount))
|
|
69
65
|
emit(_int_to_be_bytes(self.counter))
|
|
@@ -90,9 +86,13 @@ class Transaction:
|
|
|
90
86
|
@classmethod
|
|
91
87
|
def from_atom(
|
|
92
88
|
cls,
|
|
93
|
-
|
|
89
|
+
node: Any,
|
|
94
90
|
transaction_id: bytes,
|
|
95
91
|
) -> Transaction:
|
|
92
|
+
storage_get = node._local_get
|
|
93
|
+
if not callable(storage_get):
|
|
94
|
+
raise NotImplementedError("node does not expose a storage getter")
|
|
95
|
+
|
|
96
96
|
top_type_atom = storage_get(transaction_id)
|
|
97
97
|
if top_type_atom is None or top_type_atom.data != b"list":
|
|
98
98
|
raise ValueError("not a transaction (outer list missing)")
|
|
@@ -141,23 +141,20 @@ class Transaction:
|
|
|
141
141
|
if len(body_entries) < 5:
|
|
142
142
|
body_entries.extend([ZERO32] * (5 - len(body_entries)))
|
|
143
143
|
|
|
144
|
-
def
|
|
145
|
-
if
|
|
144
|
+
def read_detail_bytes(entry_id: bytes) -> bytes:
|
|
145
|
+
if entry_id == ZERO32:
|
|
146
146
|
return b""
|
|
147
147
|
elem = storage_get(entry_id)
|
|
148
148
|
if elem is None:
|
|
149
149
|
return b""
|
|
150
|
-
|
|
151
|
-
if
|
|
152
|
-
return b""
|
|
153
|
-
value_atom = storage_get(type_atom.next)
|
|
154
|
-
return value_atom.data if value_atom is not None else b""
|
|
150
|
+
detail_atom = storage_get(elem.data)
|
|
151
|
+
return detail_atom.data if detail_atom is not None else b""
|
|
155
152
|
|
|
156
|
-
amount_bytes =
|
|
157
|
-
counter_bytes =
|
|
158
|
-
data_bytes =
|
|
159
|
-
recipient_bytes =
|
|
160
|
-
sender_bytes =
|
|
153
|
+
amount_bytes = read_detail_bytes(body_entries[0])
|
|
154
|
+
counter_bytes = read_detail_bytes(body_entries[1])
|
|
155
|
+
data_bytes = read_detail_bytes(body_entries[2])
|
|
156
|
+
recipient_bytes = read_detail_bytes(body_entries[3])
|
|
157
|
+
sender_bytes = read_detail_bytes(body_entries[4])
|
|
161
158
|
|
|
162
159
|
signature_atom = storage_get(signature_atom_id)
|
|
163
160
|
signature_bytes = signature_atom.data if signature_atom is not None else b""
|
|
@@ -169,4 +166,27 @@ class Transaction:
|
|
|
169
166
|
recipient=recipient_bytes,
|
|
170
167
|
sender=sender_bytes,
|
|
171
168
|
signature=signature_bytes,
|
|
169
|
+
hash=bytes(transaction_id),
|
|
172
170
|
)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def apply_transaction(node: Any, block: object, transaction_hash: bytes) -> None:
|
|
174
|
+
"""Apply transaction to the candidate block. Override downstream."""
|
|
175
|
+
transaction = Transaction.from_atom(node, transaction_hash)
|
|
176
|
+
|
|
177
|
+
if block.transactions is None:
|
|
178
|
+
block.transactions = []
|
|
179
|
+
block.transactions.append(transaction)
|
|
180
|
+
|
|
181
|
+
receipt = Receipt(
|
|
182
|
+
transaction_hash=bytes(transaction_hash),
|
|
183
|
+
cost=0,
|
|
184
|
+
logs=b"",
|
|
185
|
+
status=STATUS_SUCCESS,
|
|
186
|
+
)
|
|
187
|
+
receipt.atomize()
|
|
188
|
+
if block.receipts is None:
|
|
189
|
+
block.receipts = []
|
|
190
|
+
block.receipts.append(receipt)
|
|
191
|
+
|
|
192
|
+
# Downstream implementations can extend this to apply state changes.
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Worker thread factories for the consensus subsystem.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .discovery import make_discovery_worker
|
|
6
|
+
from .validation import make_validation_worker
|
|
7
|
+
from .verify import make_verify_worker
|
|
8
|
+
|
|
9
|
+
__all__ = ["make_discovery_worker", "make_verify_worker", "make_validation_worker"]
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from queue import Empty
|
|
5
|
+
from typing import Any, Dict, Set, Tuple
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def make_discovery_worker(node: Any):
|
|
9
|
+
"""
|
|
10
|
+
Build the discovery worker bound to the given node.
|
|
11
|
+
|
|
12
|
+
The returned callable mirrors the previous inline worker in ``setup.py``.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def _discovery_worker() -> None:
|
|
16
|
+
stop = node._validation_stop_event
|
|
17
|
+
while not stop.is_set():
|
|
18
|
+
try:
|
|
19
|
+
peers = getattr(node, "peers", None)
|
|
20
|
+
if isinstance(peers, dict):
|
|
21
|
+
pairs: list[Tuple[Any, bytes]] = [
|
|
22
|
+
(peer_id, bytes(latest))
|
|
23
|
+
for peer_id, peer in list(peers.items())
|
|
24
|
+
if isinstance(
|
|
25
|
+
(latest := getattr(peer, "latest_block", None)),
|
|
26
|
+
(bytes, bytearray),
|
|
27
|
+
)
|
|
28
|
+
and latest
|
|
29
|
+
]
|
|
30
|
+
latest_keys: Set[bytes] = {hb for _, hb in pairs}
|
|
31
|
+
grouped: Dict[bytes, set[Any]] = {
|
|
32
|
+
hb: {pid for pid, phb in pairs if phb == hb}
|
|
33
|
+
for hb in latest_keys
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
while True:
|
|
38
|
+
node._validation_verify_queue.get_nowait()
|
|
39
|
+
except Empty:
|
|
40
|
+
pass
|
|
41
|
+
for latest_b, peer_set in grouped.items():
|
|
42
|
+
node._validation_verify_queue.put((latest_b, peer_set))
|
|
43
|
+
except Exception:
|
|
44
|
+
pass
|
|
45
|
+
finally:
|
|
46
|
+
time.sleep(0.5)
|
|
47
|
+
|
|
48
|
+
return _discovery_worker
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from queue import Empty
|
|
5
|
+
from typing import Any, Callable
|
|
6
|
+
|
|
7
|
+
from ..block import Block
|
|
8
|
+
from ..transaction import apply_transaction
|
|
9
|
+
from ..._storage.atom import bytes_list_to_atoms
|
|
10
|
+
from ..._storage.patricia import PatriciaTrie
|
|
11
|
+
from ..._communication.message import Message, MessageTopic
|
|
12
|
+
from ..._communication.ping import Ping
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def make_validation_worker(
|
|
16
|
+
node: Any,
|
|
17
|
+
*,
|
|
18
|
+
current_validator: Callable[[Any], bytes],
|
|
19
|
+
) -> Callable[[], None]:
|
|
20
|
+
"""Build the validation worker bound to the given node."""
|
|
21
|
+
|
|
22
|
+
def _validation_worker() -> None:
|
|
23
|
+
stop = node._validation_stop_event
|
|
24
|
+
while not stop.is_set():
|
|
25
|
+
validation_public_key = getattr(node, "validation_public_key", None)
|
|
26
|
+
if not validation_public_key:
|
|
27
|
+
time.sleep(0.5)
|
|
28
|
+
continue
|
|
29
|
+
|
|
30
|
+
scheduled_validator = current_validator(node)
|
|
31
|
+
|
|
32
|
+
if scheduled_validator != validation_public_key:
|
|
33
|
+
time.sleep(0.5)
|
|
34
|
+
continue
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
current_hash = node._validation_transaction_queue.get_nowait()
|
|
38
|
+
except Empty:
|
|
39
|
+
time.sleep(0.1)
|
|
40
|
+
continue
|
|
41
|
+
|
|
42
|
+
# create thread to perform vdf
|
|
43
|
+
|
|
44
|
+
new_block = Block()
|
|
45
|
+
new_block.validator_public_key = validation_public_key
|
|
46
|
+
new_block.previous_block_hash = node.latest_block_hash
|
|
47
|
+
try:
|
|
48
|
+
new_block.previous_block = Block.from_atom(node, new_block.previous_block_hash)
|
|
49
|
+
except Exception:
|
|
50
|
+
continue
|
|
51
|
+
new_block.accounts = PatriciaTrie(root_hash=new_block.previous_block.accounts_hash)
|
|
52
|
+
|
|
53
|
+
# we may want to add a timer to process part of the txs only on a slow computer
|
|
54
|
+
while True:
|
|
55
|
+
try:
|
|
56
|
+
apply_transaction(node, new_block, current_hash)
|
|
57
|
+
except NotImplementedError:
|
|
58
|
+
node._validation_transaction_queue.put(current_hash)
|
|
59
|
+
time.sleep(0.5)
|
|
60
|
+
break
|
|
61
|
+
except Exception:
|
|
62
|
+
pass
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
current_hash = node._validation_transaction_queue.get_nowait()
|
|
66
|
+
except Empty:
|
|
67
|
+
break
|
|
68
|
+
|
|
69
|
+
# create an atom list of transactions, save the list head hash as the block's transactions_hash
|
|
70
|
+
transactions = new_block.transactions or []
|
|
71
|
+
tx_hashes = [bytes(tx.hash) for tx in transactions if tx.hash]
|
|
72
|
+
head_hash, _ = bytes_list_to_atoms(tx_hashes)
|
|
73
|
+
new_block.transactions_hash = head_hash
|
|
74
|
+
|
|
75
|
+
receipts = new_block.receipts or []
|
|
76
|
+
receipt_hashes = [bytes(rcpt.hash) for rcpt in receipts if rcpt.hash]
|
|
77
|
+
receipts_head, _ = bytes_list_to_atoms(receipt_hashes)
|
|
78
|
+
new_block.receipts_hash = receipts_head
|
|
79
|
+
|
|
80
|
+
# get vdf result, default to 0 for now
|
|
81
|
+
|
|
82
|
+
# get timestamp or wait for a the next second from the previous block, rule is the next block must be atleast 1 second after the previous
|
|
83
|
+
now = time.time()
|
|
84
|
+
min_allowed = new_block.previous_block.timestamp + 1
|
|
85
|
+
if now < min_allowed:
|
|
86
|
+
time.sleep(max(0.0, min_allowed - now))
|
|
87
|
+
now = time.time()
|
|
88
|
+
new_block.timestamp = max(int(now), min_allowed)
|
|
89
|
+
|
|
90
|
+
# atomize block
|
|
91
|
+
new_block_hash, _ = new_block.to_atom()
|
|
92
|
+
# put as own latest block hash
|
|
93
|
+
node.latest_block_hash = new_block_hash
|
|
94
|
+
|
|
95
|
+
# ping peers in the validation route to update there records
|
|
96
|
+
if node.validation_route and node.outgoing_queue and node.addresses:
|
|
97
|
+
route_peers = {
|
|
98
|
+
peer_key
|
|
99
|
+
for bucket in getattr(node.validation_route, "buckets", {}).values()
|
|
100
|
+
for peer_key in bucket
|
|
101
|
+
}
|
|
102
|
+
if route_peers:
|
|
103
|
+
ping_payload = Ping(
|
|
104
|
+
is_validator=True,
|
|
105
|
+
latest_block=new_block_hash,
|
|
106
|
+
).to_bytes()
|
|
107
|
+
|
|
108
|
+
message_bytes = Message(
|
|
109
|
+
topic=MessageTopic.PING,
|
|
110
|
+
content=ping_payload,
|
|
111
|
+
).to_bytes()
|
|
112
|
+
|
|
113
|
+
for address, peer_key in node.addresses.items():
|
|
114
|
+
if peer_key in route_peers:
|
|
115
|
+
try:
|
|
116
|
+
node.outgoing_queue.put((message_bytes, address))
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
# store the new block and receipts
|
|
121
|
+
|
|
122
|
+
return _validation_worker
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from queue import Empty
|
|
5
|
+
from typing import Any, Set
|
|
6
|
+
|
|
7
|
+
from ..fork import Fork
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _process_peers_latest_block(
|
|
11
|
+
node: Any, latest_block_hash: bytes, peer_ids: Set[Any]
|
|
12
|
+
) -> None:
|
|
13
|
+
"""Assign peers to the fork that matches their reported head."""
|
|
14
|
+
new_fork = Fork(head=latest_block_hash)
|
|
15
|
+
|
|
16
|
+
current_fork_heads = {
|
|
17
|
+
fk.head for fk in node.forks.values() if fk.head != latest_block_hash
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
|
|
21
|
+
|
|
22
|
+
if new_fork.validated_upto and new_fork.validated_upto in node.forks:
|
|
23
|
+
ref = node.forks[new_fork.validated_upto]
|
|
24
|
+
if getattr(ref, "malicious_block_hash", None):
|
|
25
|
+
return
|
|
26
|
+
new_fork.root = ref.root
|
|
27
|
+
new_fork.validated_upto = ref.validated_upto
|
|
28
|
+
new_fork.chain_fork_position = ref.chain_fork_position
|
|
29
|
+
|
|
30
|
+
for peer_id in peer_ids:
|
|
31
|
+
new_fork.add_peer(peer_id)
|
|
32
|
+
for head, fork in list(node.forks.items()):
|
|
33
|
+
if head != latest_block_hash:
|
|
34
|
+
fork.remove_peer(peer_id)
|
|
35
|
+
|
|
36
|
+
node.forks[latest_block_hash] = new_fork
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def make_verify_worker(node: Any):
|
|
40
|
+
"""Build the verify worker bound to the given node."""
|
|
41
|
+
|
|
42
|
+
def _verify_worker() -> None:
|
|
43
|
+
stop = node._validation_stop_event
|
|
44
|
+
while not stop.is_set():
|
|
45
|
+
batch: list[tuple[bytes, Set[Any]]] = []
|
|
46
|
+
try:
|
|
47
|
+
while True:
|
|
48
|
+
latest_b, peers = node._validation_verify_queue.get_nowait()
|
|
49
|
+
batch.append((latest_b, peers))
|
|
50
|
+
except Empty:
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
if not batch:
|
|
54
|
+
time.sleep(0.1)
|
|
55
|
+
continue
|
|
56
|
+
|
|
57
|
+
for latest_b, peers in batch:
|
|
58
|
+
try:
|
|
59
|
+
_process_peers_latest_block(node, latest_b, peers)
|
|
60
|
+
except Exception:
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
return _verify_worker
|
astreum/_storage/atom.py
CHANGED
|
@@ -52,11 +52,11 @@ class Atom:
|
|
|
52
52
|
data = buf[len(ZERO32):]
|
|
53
53
|
return Atom(data=data, next=next_hash, size=len(data))
|
|
54
54
|
|
|
55
|
-
def expr_to_atoms(e: Expr) -> Tuple[bytes, List[Atom]]:
|
|
56
|
-
def symbol(value: str) -> Tuple[bytes, List[Atom]]:
|
|
57
|
-
val = value.encode("utf-8")
|
|
58
|
-
val_atom = Atom.from_data(data=val)
|
|
59
|
-
typ_atom = Atom.from_data(b"symbol", val_atom.object_id())
|
|
55
|
+
def expr_to_atoms(e: Expr) -> Tuple[bytes, List[Atom]]:
|
|
56
|
+
def symbol(value: str) -> Tuple[bytes, List[Atom]]:
|
|
57
|
+
val = value.encode("utf-8")
|
|
58
|
+
val_atom = Atom.from_data(data=val)
|
|
59
|
+
typ_atom = Atom.from_data(b"symbol", val_atom.object_id())
|
|
60
60
|
return typ_atom.object_id(), [val_atom, typ_atom]
|
|
61
61
|
|
|
62
62
|
def bytes(data: bytes) -> Tuple[bytes, List[Atom]]:
|
|
@@ -96,5 +96,22 @@ def expr_to_atoms(e: Expr) -> Tuple[bytes, List[Atom]]:
|
|
|
96
96
|
if isinstance(e, Expr.Error):
|
|
97
97
|
return err(e.topic, e.origin)
|
|
98
98
|
if isinstance(e, Expr.ListExpr):
|
|
99
|
-
return lst(e.elements)
|
|
100
|
-
raise TypeError("unknown Expr variant")
|
|
99
|
+
return lst(e.elements)
|
|
100
|
+
raise TypeError("unknown Expr variant")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def bytes_list_to_atoms(values: List[bytes]) -> Tuple[bytes, List[Atom]]:
|
|
104
|
+
"""Build a forward-ordered linked list of atoms from byte payloads.
|
|
105
|
+
|
|
106
|
+
Returns the head object's hash (ZERO32 if no values) and the atoms created.
|
|
107
|
+
"""
|
|
108
|
+
next_hash = ZERO32
|
|
109
|
+
atoms: List[Atom] = []
|
|
110
|
+
|
|
111
|
+
for value in reversed(values):
|
|
112
|
+
atom = Atom.from_data(data=bytes(value), next_hash=next_hash)
|
|
113
|
+
atoms.append(atom)
|
|
114
|
+
next_hash = atom.object_id()
|
|
115
|
+
|
|
116
|
+
atoms.reverse()
|
|
117
|
+
return (next_hash if values else ZERO32), atoms
|