astreum 0.2.40__py3-none-any.whl → 0.2.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,141 @@
1
+
2
+ from __future__ import annotations
3
+
4
+ from typing import Any, Iterable, List, Optional, Tuple
5
+
6
+ from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
7
+
8
+ from .account import Account
9
+ from .block import Block
10
+ from .._storage.atom import Atom, ZERO32
11
+ from .._storage.patricia import PatriciaTrie, PatriciaNode
12
+
13
+ TREASURY_ADDRESS = b"\x01" * 32
14
+ BURN_ADDRESS = b"\x00" * 32
15
+
16
+
17
+ def _int_to_be_bytes(value: int) -> bytes:
18
+ if value < 0:
19
+ raise ValueError("integer fields in genesis must be non-negative")
20
+ if value == 0:
21
+ return b"\x00"
22
+ length = (value.bit_length() + 7) // 8
23
+ return value.to_bytes(length, "big")
24
+
25
+
26
+ def _make_list(child_ids: List[bytes]) -> Tuple[bytes, List[Atom]]:
27
+ next_hash = ZERO32
28
+ chain: List[Atom] = []
29
+ for child_id in reversed(child_ids):
30
+ elem = Atom.from_data(data=child_id, next_hash=next_hash)
31
+ next_hash = elem.object_id()
32
+ chain.append(elem)
33
+ chain.reverse()
34
+
35
+ value_atom = Atom.from_data(
36
+ data=len(child_ids).to_bytes(8, "little"),
37
+ next_hash=next_hash,
38
+ )
39
+ type_atom = Atom.from_data(data=b"list", next_hash=value_atom.object_id())
40
+ atoms = chain + [value_atom, type_atom]
41
+ return type_atom.object_id(), atoms
42
+
43
+
44
+ def _store_atoms(node: Any, atoms: Iterable[Atom]) -> None:
45
+ setter = getattr(node, "_local_set", None)
46
+ if not callable(setter):
47
+ raise TypeError("node must expose '_local_set(object_id, atom)'")
48
+ for atom in atoms:
49
+ setter(atom.object_id(), atom)
50
+
51
+
52
+ def _persist_trie(trie: PatriciaTrie, node: Any) -> None:
53
+ for patricia_node in trie.nodes.values():
54
+ _, atoms = patricia_node.to_atoms()
55
+ _store_atoms(node, atoms)
56
+
57
+
58
+ if not hasattr(PatriciaNode, "to_bytes"):
59
+ def _patricia_node_to_bytes(self: PatriciaNode) -> bytes: # type: ignore[no-redef]
60
+ fields = [
61
+ bytes([self.key_len]) + self.key,
62
+ self.child_0 or ZERO32,
63
+ self.child_1 or ZERO32,
64
+ self.value or b"",
65
+ ]
66
+ encoded: List[bytes] = []
67
+ for field in fields:
68
+ encoded.append(len(field).to_bytes(4, "big"))
69
+ encoded.append(field)
70
+ return b"".join(encoded)
71
+
72
+ PatriciaNode.to_bytes = _patricia_node_to_bytes # type: ignore[attr-defined]
73
+
74
+
75
+ def create_genesis_block(node: Any, validator_public_key: bytes, validator_secret_key: bytes) -> Block:
76
+ validator_pk = bytes(validator_public_key)
77
+
78
+ if len(validator_pk) != 32:
79
+ raise ValueError("validator_public_key must be 32 bytes")
80
+
81
+ # 1. Stake trie with single validator stake of 1 (encoded on 32 bytes).
82
+ stake_trie = PatriciaTrie()
83
+ stake_amount = (1).to_bytes(32, "big")
84
+ stake_trie.put(node, validator_pk, stake_amount)
85
+ _persist_trie(stake_trie, node)
86
+ stake_root = stake_trie.root_hash or ZERO32
87
+
88
+ # 2. Account trie with treasury, burn, and validator accounts.
89
+ accounts_trie = PatriciaTrie()
90
+
91
+ treasury_account = Account.create(balance=1, data=stake_root, nonce=0)
92
+ treasury_account_id, treasury_atoms = treasury_account.to_atom()
93
+ _store_atoms(node, treasury_atoms)
94
+ accounts_trie.put(node, TREASURY_ADDRESS, treasury_account_id)
95
+
96
+ burn_account = Account.create(balance=0, data=b"", nonce=0)
97
+ burn_account_id, burn_atoms = burn_account.to_atom()
98
+ _store_atoms(node, burn_atoms)
99
+ accounts_trie.put(node, BURN_ADDRESS, burn_account_id)
100
+
101
+ validator_account = Account.create(balance=0, data=b"", nonce=0)
102
+ validator_account_id, validator_atoms = validator_account.to_atom()
103
+ _store_atoms(node, validator_atoms)
104
+ accounts_trie.put(node, validator_pk, validator_account_id)
105
+
106
+ _persist_trie(accounts_trie, node)
107
+
108
+ accounts_root = accounts_trie.root_hash
109
+ if accounts_root is None:
110
+ raise ValueError("genesis accounts trie is empty")
111
+
112
+ # 3. Assemble block metadata.
113
+ block = Block()
114
+ block.previous_block_hash = ZERO32
115
+ block.number = 0
116
+ block.timestamp = 0
117
+ block.accounts_hash = accounts_root
118
+ block.accounts = accounts_trie
119
+ block.transactions_total_fees = 0
120
+ block.transactions_hash = ZERO32
121
+ block.receipts_hash = ZERO32
122
+ block.delay_difficulty = 0
123
+ block.delay_output = b""
124
+ block.validator_public_key = validator_pk
125
+ block.transactions = []
126
+ block.receipts = []
127
+
128
+ # 4. Sign the block body with the validator secret key.
129
+ block.signature = b""
130
+ block.to_atom()
131
+
132
+ if block.body_hash is None:
133
+ raise ValueError("failed to materialise genesis block body")
134
+
135
+ secret = Ed25519PrivateKey.from_private_bytes(validator_secret_key)
136
+ block.signature = secret.sign(block.body_hash)
137
+ block_hash, block_atoms = block.to_atom()
138
+ _store_atoms(node, block_atoms)
139
+
140
+ block.hash = block_hash
141
+ return block
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from dataclasses import dataclass
3
+ from dataclasses import dataclass, field
4
4
  from typing import Callable, List, Optional, Tuple
5
5
 
6
6
  from .._storage.atom import Atom, ZERO32
@@ -77,6 +77,8 @@ class Receipt:
77
77
  cost: int = 0
78
78
  logs: bytes = b""
79
79
  status: int = 0
80
+ hash: bytes = ZERO32
81
+ atoms: List[Atom] = field(default_factory=list)
80
82
 
81
83
  def to_atom(self) -> Tuple[bytes, List[Atom]]:
82
84
  """Serialise the receipt into Atom storage."""
@@ -109,6 +111,13 @@ class Receipt:
109
111
 
110
112
  return top_list_id, atoms
111
113
 
114
+ def atomize(self) -> Tuple[bytes, List[Atom]]:
115
+ """Generate atoms for this receipt and cache them."""
116
+ receipt_id, atoms = self.to_atom()
117
+ self.hash = receipt_id
118
+ self.atoms = atoms
119
+ return receipt_id, atoms
120
+
112
121
  @classmethod
113
122
  def from_atom(
114
123
  cls,
@@ -164,4 +173,5 @@ class Receipt:
164
173
  cost=_be_bytes_to_int(cost_bytes),
165
174
  logs=logs_bytes,
166
175
  status=status_value,
176
+ hash=bytes(receipt_id),
167
177
  )
@@ -1,15 +1,14 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import threading
4
- import time
5
- from queue import Empty, Queue
6
- from typing import Any, Dict, Optional, Tuple
4
+ from queue import Queue
5
+ from typing import Any
7
6
 
8
- from .block import Block
9
- from .chain import Chain
10
- from .fork import Fork
11
- from .transaction import Transaction, apply_transaction
12
- from .._storage.atom import ZERO32, Atom
7
+ from .workers import (
8
+ make_discovery_worker,
9
+ make_validation_worker,
10
+ make_verify_worker,
11
+ )
13
12
 
14
13
 
15
14
  def current_validator(node: Any) -> bytes:
@@ -47,159 +46,23 @@ def consensus_setup(node: Any) -> None:
47
46
 
48
47
  node.enqueue_transaction_hash = enqueue_transaction_hash
49
48
 
50
- def _process_peers_latest_block(latest_block_hash: bytes, peer_ids: set[Any]) -> None:
51
- """Assign a peer to a fork for its latest block without merging forks.
52
-
53
- Flow:
54
- - Create a new Fork for `latest_block_hash` and validate it, using
55
- stop_heads composed of current fork heads to short-circuit when
56
- ancestry meets an existing fork head.
57
- - If a matching fork head is found and is not malicious, copy its
58
- structural fields (root, validated_upto, chain_fork_position) onto
59
- the new fork.
60
- - Add all peers in `peer_ids` to the new fork and remove each from any
61
- previous fork they followed.
62
- - Persist the new fork under `node.forks[latest_block_hash]`.
63
- """
64
- new_fork = Fork(head=latest_block_hash)
65
-
66
- current_fork_heads = {fk.head for fk in node.forks.values() if fk.head != latest_block_hash}
67
-
68
- new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
69
-
70
- # update new_fork with details of the fork with head of validated_upto
71
- if new_fork.validated_upto and new_fork.validated_upto in node.forks:
72
- ref = node.forks[new_fork.validated_upto]
73
- # if the matched fork is malicious, disregard this new fork entirely
74
- if getattr(ref, "malicious_block_hash", None):
75
- return
76
- # copy structural fields exactly
77
- new_fork.root = ref.root
78
- new_fork.validated_upto = ref.validated_upto
79
- new_fork.chain_fork_position = ref.chain_fork_position
80
-
81
- # add peers to new fork and remove them from any old forks
82
- for peer_id in peer_ids:
83
- new_fork.add_peer(peer_id)
84
- # Remove this peer from all other forks
85
- for h, fk in list(node.forks.items()):
86
- if h != latest_block_hash:
87
- fk.remove_peer(peer_id)
88
-
89
- # persist the fork
90
- node.forks[latest_block_hash] = new_fork
91
-
92
-
93
- # Discovery worker: watches peers and enqueues head changes
94
- def _discovery_worker():
95
- stop = node._validation_stop_event
96
- while not stop.is_set():
97
- try:
98
- peers = getattr(node, "peers", None)
99
- if isinstance(peers, dict):
100
- # Snapshot as (peer_id, latest_block_hash) pairs
101
- pairs = [
102
- (peer_id, bytes(latest))
103
- for peer_id, peer in list(peers.items())
104
- if isinstance((latest := getattr(peer, "latest_block", None)), (bytes, bytearray)) and latest
105
- ]
106
- # Group peers by latest block hash
107
- latest_keys = {hb for _, hb in pairs}
108
- grouped: Dict[bytes, set[Any]] = {
109
- hb: {pid for pid, phb in pairs if phb == hb}
110
- for hb in latest_keys
111
- }
112
-
113
- # Replace queue contents with current groups
114
- try:
115
- while True:
116
- node._validation_verify_queue.get_nowait()
117
- except Empty:
118
- pass
119
- for latest_b, peer_set in grouped.items():
120
- node._validation_verify_queue.put((latest_b, peer_set))
121
- except Exception:
122
- pass
123
- finally:
124
- time.sleep(0.5)
125
-
126
- # Verification worker: computes root/height and applies peer→fork assignment
127
- def _verify_worker():
128
- stop = node._validation_stop_event
129
- while not stop.is_set():
130
- # Take a snapshot of all currently queued groups
131
- batch: list[tuple[bytes, set[Any]]] = []
132
- try:
133
- while True:
134
- item = node._validation_verify_queue.get_nowait()
135
- batch.append(item)
136
- except Empty:
137
- pass
138
-
139
- if not batch:
140
- time.sleep(0.1)
141
- continue
142
-
143
- # Process the snapshot; new items enqueued during processing
144
- # will be handled in the next iteration
145
- for latest_b, peers in batch:
146
- try:
147
- _process_peers_latest_block(latest_b, peers)
148
- except Exception:
149
- pass
150
-
151
- def _validation_worker() -> None:
152
- """Consume pending transactions when scheduled to validate."""
153
- stop = node._validation_stop_event
154
- while not stop.is_set():
155
- validation_public_key = getattr(node, "validation_public_key", None)
156
- if not validation_public_key:
157
- time.sleep(0.5)
158
- continue
159
-
160
- scheduled_validator = current_validator(node)
161
-
162
- if scheduled_validator != validation_public_key:
163
- time.sleep(0.5)
164
- continue
165
-
166
- try:
167
- current_hash = node._validation_transaction_queue.get_nowait()
168
- except Empty:
169
- time.sleep(0.1)
170
- continue
171
-
172
- new_block = Block()
173
- new_block.validator_public_key = getattr(node, "validation_public_key", None)
174
-
175
- while True:
176
- try:
177
- apply_transaction(node, new_block, current_hash)
178
- except NotImplementedError:
179
- node._validation_transaction_queue.put(current_hash)
180
- time.sleep(0.5)
181
- break
182
- except Exception:
183
- # Skip problematic transaction; leave block as-is.
184
- pass
185
-
186
- try:
187
- current_hash = node._validation_transaction_queue.get_nowait()
188
- except Empty:
189
- break
49
+ verify_worker = make_verify_worker(node)
50
+ validation_worker = make_validation_worker(
51
+ node, current_validator=current_validator
52
+ )
190
53
 
191
54
  # Start workers as daemons
55
+ discovery_worker = make_discovery_worker(node)
192
56
  node.consensus_discovery_thread = threading.Thread(
193
- target=_discovery_worker, daemon=True, name="consensus-discovery"
57
+ target=discovery_worker, daemon=True, name="consensus-discovery"
194
58
  )
195
59
  node.consensus_verify_thread = threading.Thread(
196
- target=_verify_worker, daemon=True, name="consensus-verify"
60
+ target=verify_worker, daemon=True, name="consensus-verify"
197
61
  )
198
62
  node.consensus_validation_thread = threading.Thread(
199
- target=_validation_worker, daemon=True, name="consensus-validation"
63
+ target=validation_worker, daemon=True, name="consensus-validation"
200
64
  )
201
65
  node.consensus_discovery_thread.start()
202
66
  node.consensus_verify_thread.start()
203
67
  if getattr(node, "validation_secret_key", None):
204
68
  node.consensus_validation_thread.start()
205
-
@@ -1,9 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import Any, Callable, List, Optional, Tuple
4
+ from typing import Any, List, Optional, Tuple
5
5
 
6
6
  from .._storage.atom import Atom, ZERO32
7
+ from .receipt import Receipt, STATUS_SUCCESS
7
8
 
8
9
 
9
10
  def _int_to_be_bytes(value: Optional[int]) -> bytes:
@@ -48,6 +49,7 @@ class Transaction:
48
49
  recipient: bytes = b""
49
50
  sender: bytes = b""
50
51
  signature: bytes = b""
52
+ hash: bytes = ZERO32
51
53
 
52
54
  def to_atom(self) -> Tuple[bytes, List[Atom]]:
53
55
  """Serialise the transaction, returning (object_id, atoms)."""
@@ -84,9 +86,13 @@ class Transaction:
84
86
  @classmethod
85
87
  def from_atom(
86
88
  cls,
87
- storage_get: Callable[[bytes], Optional[Atom]],
89
+ node: Any,
88
90
  transaction_id: bytes,
89
91
  ) -> Transaction:
92
+ storage_get = node._local_get
93
+ if not callable(storage_get):
94
+ raise NotImplementedError("node does not expose a storage getter")
95
+
90
96
  top_type_atom = storage_get(transaction_id)
91
97
  if top_type_atom is None or top_type_atom.data != b"list":
92
98
  raise ValueError("not a transaction (outer list missing)")
@@ -160,9 +166,27 @@ class Transaction:
160
166
  recipient=recipient_bytes,
161
167
  sender=sender_bytes,
162
168
  signature=signature_bytes,
169
+ hash=bytes(transaction_id),
163
170
  )
164
171
 
165
172
 
166
173
  def apply_transaction(node: Any, block: object, transaction_hash: bytes) -> None:
167
174
  """Apply transaction to the candidate block. Override downstream."""
168
- pass
175
+ transaction = Transaction.from_atom(node, transaction_hash)
176
+
177
+ if block.transactions is None:
178
+ block.transactions = []
179
+ block.transactions.append(transaction)
180
+
181
+ receipt = Receipt(
182
+ transaction_hash=bytes(transaction_hash),
183
+ cost=0,
184
+ logs=b"",
185
+ status=STATUS_SUCCESS,
186
+ )
187
+ receipt.atomize()
188
+ if block.receipts is None:
189
+ block.receipts = []
190
+ block.receipts.append(receipt)
191
+
192
+ # Downstream implementations can extend this to apply state changes.
@@ -0,0 +1,9 @@
1
+ """
2
+ Worker thread factories for the consensus subsystem.
3
+ """
4
+
5
+ from .discovery import make_discovery_worker
6
+ from .validation import make_validation_worker
7
+ from .verify import make_verify_worker
8
+
9
+ __all__ = ["make_discovery_worker", "make_verify_worker", "make_validation_worker"]
@@ -0,0 +1,48 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from queue import Empty
5
+ from typing import Any, Dict, Set, Tuple
6
+
7
+
8
+ def make_discovery_worker(node: Any):
9
+ """
10
+ Build the discovery worker bound to the given node.
11
+
12
+ The returned callable mirrors the previous inline worker in ``setup.py``.
13
+ """
14
+
15
+ def _discovery_worker() -> None:
16
+ stop = node._validation_stop_event
17
+ while not stop.is_set():
18
+ try:
19
+ peers = getattr(node, "peers", None)
20
+ if isinstance(peers, dict):
21
+ pairs: list[Tuple[Any, bytes]] = [
22
+ (peer_id, bytes(latest))
23
+ for peer_id, peer in list(peers.items())
24
+ if isinstance(
25
+ (latest := getattr(peer, "latest_block", None)),
26
+ (bytes, bytearray),
27
+ )
28
+ and latest
29
+ ]
30
+ latest_keys: Set[bytes] = {hb for _, hb in pairs}
31
+ grouped: Dict[bytes, set[Any]] = {
32
+ hb: {pid for pid, phb in pairs if phb == hb}
33
+ for hb in latest_keys
34
+ }
35
+
36
+ try:
37
+ while True:
38
+ node._validation_verify_queue.get_nowait()
39
+ except Empty:
40
+ pass
41
+ for latest_b, peer_set in grouped.items():
42
+ node._validation_verify_queue.put((latest_b, peer_set))
43
+ except Exception:
44
+ pass
45
+ finally:
46
+ time.sleep(0.5)
47
+
48
+ return _discovery_worker
@@ -0,0 +1,122 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from queue import Empty
5
+ from typing import Any, Callable
6
+
7
+ from ..block import Block
8
+ from ..transaction import apply_transaction
9
+ from ..._storage.atom import bytes_list_to_atoms
10
+ from ..._storage.patricia import PatriciaTrie
11
+ from ..._communication.message import Message, MessageTopic
12
+ from ..._communication.ping import Ping
13
+
14
+
15
+ def make_validation_worker(
16
+ node: Any,
17
+ *,
18
+ current_validator: Callable[[Any], bytes],
19
+ ) -> Callable[[], None]:
20
+ """Build the validation worker bound to the given node."""
21
+
22
+ def _validation_worker() -> None:
23
+ stop = node._validation_stop_event
24
+ while not stop.is_set():
25
+ validation_public_key = getattr(node, "validation_public_key", None)
26
+ if not validation_public_key:
27
+ time.sleep(0.5)
28
+ continue
29
+
30
+ scheduled_validator = current_validator(node)
31
+
32
+ if scheduled_validator != validation_public_key:
33
+ time.sleep(0.5)
34
+ continue
35
+
36
+ try:
37
+ current_hash = node._validation_transaction_queue.get_nowait()
38
+ except Empty:
39
+ time.sleep(0.1)
40
+ continue
41
+
42
+ # create thread to perform vdf
43
+
44
+ new_block = Block()
45
+ new_block.validator_public_key = validation_public_key
46
+ new_block.previous_block_hash = node.latest_block_hash
47
+ try:
48
+ new_block.previous_block = Block.from_atom(node, new_block.previous_block_hash)
49
+ except Exception:
50
+ continue
51
+ new_block.accounts = PatriciaTrie(root_hash=new_block.previous_block.accounts_hash)
52
+
53
+ # we may want to add a timer to process part of the txs only on a slow computer
54
+ while True:
55
+ try:
56
+ apply_transaction(node, new_block, current_hash)
57
+ except NotImplementedError:
58
+ node._validation_transaction_queue.put(current_hash)
59
+ time.sleep(0.5)
60
+ break
61
+ except Exception:
62
+ pass
63
+
64
+ try:
65
+ current_hash = node._validation_transaction_queue.get_nowait()
66
+ except Empty:
67
+ break
68
+
69
+ # create an atom list of transactions, save the list head hash as the block's transactions_hash
70
+ transactions = new_block.transactions or []
71
+ tx_hashes = [bytes(tx.hash) for tx in transactions if tx.hash]
72
+ head_hash, _ = bytes_list_to_atoms(tx_hashes)
73
+ new_block.transactions_hash = head_hash
74
+
75
+ receipts = new_block.receipts or []
76
+ receipt_hashes = [bytes(rcpt.hash) for rcpt in receipts if rcpt.hash]
77
+ receipts_head, _ = bytes_list_to_atoms(receipt_hashes)
78
+ new_block.receipts_hash = receipts_head
79
+
80
+ # get vdf result, default to 0 for now
81
+
82
+ # get timestamp or wait for a the next second from the previous block, rule is the next block must be atleast 1 second after the previous
83
+ now = time.time()
84
+ min_allowed = new_block.previous_block.timestamp + 1
85
+ if now < min_allowed:
86
+ time.sleep(max(0.0, min_allowed - now))
87
+ now = time.time()
88
+ new_block.timestamp = max(int(now), min_allowed)
89
+
90
+ # atomize block
91
+ new_block_hash, _ = new_block.to_atom()
92
+ # put as own latest block hash
93
+ node.latest_block_hash = new_block_hash
94
+
95
+ # ping peers in the validation route to update there records
96
+ if node.validation_route and node.outgoing_queue and node.addresses:
97
+ route_peers = {
98
+ peer_key
99
+ for bucket in getattr(node.validation_route, "buckets", {}).values()
100
+ for peer_key in bucket
101
+ }
102
+ if route_peers:
103
+ ping_payload = Ping(
104
+ is_validator=True,
105
+ latest_block=new_block_hash,
106
+ ).to_bytes()
107
+
108
+ message_bytes = Message(
109
+ topic=MessageTopic.PING,
110
+ content=ping_payload,
111
+ ).to_bytes()
112
+
113
+ for address, peer_key in node.addresses.items():
114
+ if peer_key in route_peers:
115
+ try:
116
+ node.outgoing_queue.put((message_bytes, address))
117
+ except Exception:
118
+ pass
119
+
120
+ # store the new block and receipts
121
+
122
+ return _validation_worker
@@ -0,0 +1,63 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from queue import Empty
5
+ from typing import Any, Set
6
+
7
+ from ..fork import Fork
8
+
9
+
10
+ def _process_peers_latest_block(
11
+ node: Any, latest_block_hash: bytes, peer_ids: Set[Any]
12
+ ) -> None:
13
+ """Assign peers to the fork that matches their reported head."""
14
+ new_fork = Fork(head=latest_block_hash)
15
+
16
+ current_fork_heads = {
17
+ fk.head for fk in node.forks.values() if fk.head != latest_block_hash
18
+ }
19
+
20
+ new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
21
+
22
+ if new_fork.validated_upto and new_fork.validated_upto in node.forks:
23
+ ref = node.forks[new_fork.validated_upto]
24
+ if getattr(ref, "malicious_block_hash", None):
25
+ return
26
+ new_fork.root = ref.root
27
+ new_fork.validated_upto = ref.validated_upto
28
+ new_fork.chain_fork_position = ref.chain_fork_position
29
+
30
+ for peer_id in peer_ids:
31
+ new_fork.add_peer(peer_id)
32
+ for head, fork in list(node.forks.items()):
33
+ if head != latest_block_hash:
34
+ fork.remove_peer(peer_id)
35
+
36
+ node.forks[latest_block_hash] = new_fork
37
+
38
+
39
+ def make_verify_worker(node: Any):
40
+ """Build the verify worker bound to the given node."""
41
+
42
+ def _verify_worker() -> None:
43
+ stop = node._validation_stop_event
44
+ while not stop.is_set():
45
+ batch: list[tuple[bytes, Set[Any]]] = []
46
+ try:
47
+ while True:
48
+ latest_b, peers = node._validation_verify_queue.get_nowait()
49
+ batch.append((latest_b, peers))
50
+ except Empty:
51
+ pass
52
+
53
+ if not batch:
54
+ time.sleep(0.1)
55
+ continue
56
+
57
+ for latest_b, peers in batch:
58
+ try:
59
+ _process_peers_latest_block(node, latest_b, peers)
60
+ except Exception:
61
+ pass
62
+
63
+ return _verify_worker