astreum 0.2.36__py3-none-any.whl → 0.2.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of astreum might be problematic. Click here for more details.

@@ -0,0 +1,296 @@
1
+
2
+ from typing import Callable, List, Optional, Tuple
3
+
4
+ from .._storage.atom import Atom, ZERO32
5
+ from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PublicKey
6
+ from cryptography.exceptions import InvalidSignature
7
+
8
+
9
+ def _int_to_be_bytes(n: Optional[int]) -> bytes:
10
+ if n is None:
11
+ return b""
12
+ n = int(n)
13
+ if n == 0:
14
+ return b"\x00"
15
+ size = (n.bit_length() + 7) // 8
16
+ return n.to_bytes(size, "big")
17
+
18
+
19
+ def _be_bytes_to_int(b: Optional[bytes]) -> int:
20
+ if not b:
21
+ return 0
22
+ return int.from_bytes(b, "big")
23
+
24
+
25
+ def _make_typed_bytes(data: bytes) -> Tuple[bytes, List[Atom]]:
26
+ """Create a typed 'byte' atom for the given payload.
27
+
28
+ Returns (object_id, atoms_in_dependency_order).
29
+ """
30
+ val = Atom.from_data(data=data)
31
+ typ = Atom.from_data(data=b"byte", next_hash=val.object_id())
32
+ return typ.object_id(), [val, typ]
33
+
34
+
35
+ def _make_list(child_ids: List[bytes]) -> Tuple[bytes, List[Atom]]:
36
+ """Create a typed 'list' atom for child object ids.
37
+
38
+ Encodes elements as a linked chain of element-atoms with data=child_id and
39
+ next pointing to the next element's object id. The list value atom contains
40
+ the element count and points to the head of the element chain. The type atom
41
+ identifies the structure as a list.
42
+ """
43
+ acc: List[Atom] = []
44
+ next_hash = ZERO32
45
+ elem_atoms: List[Atom] = []
46
+ # Build element chain in reverse, then flip to maintain forward order
47
+ for h in reversed(child_ids):
48
+ a = Atom.from_data(data=h, next_hash=next_hash)
49
+ next_hash = a.object_id()
50
+ elem_atoms.append(a)
51
+ elem_atoms.reverse()
52
+ head = next_hash
53
+ val = Atom.from_data(data=(len(child_ids)).to_bytes(8, "little"), next_hash=head)
54
+ typ = Atom.from_data(data=b"list", next_hash=val.object_id())
55
+ return typ.object_id(), acc + elem_atoms + [val, typ]
56
+
57
+
58
+ class Block:
59
+ """Validation Block representation using Atom storage.
60
+
61
+ Top-level encoding:
62
+ block_id = list([ type_atom, body_list, signature_atom ])
63
+ where: type_atom = Atom(data=b"block", next=body_list_id)
64
+ body_list = list([...details...])
65
+ signature_atom = Atom(data=<signature-bytes>)
66
+
67
+ Details order in body_list:
68
+ 0: previous_block (bytes)
69
+ 1: number (int → big-endian bytes)
70
+ 2: timestamp (int → big-endian bytes)
71
+ 3: accounts_hash (bytes)
72
+ 4: transactions_total_fees (int → big-endian bytes)
73
+ 5: transactions_root_hash (bytes)
74
+ 6: delay_difficulty (int → big-endian bytes)
75
+ 7: delay_output (bytes)
76
+ 8: validator_public_key (bytes)
77
+
78
+ Notes:
79
+ - "body tree" is represented here by the body_list id (self.body_hash), not
80
+ embedded again as a field to avoid circular references.
81
+ - "signature" is a field on the class but is not required for validation
82
+ navigation; include it in the instance but it is not encoded in atoms
83
+ unless explicitly provided via details extension in the future.
84
+ """
85
+
86
+ # essential identifiers
87
+ hash: bytes
88
+ previous_block: bytes
89
+
90
+ # block details
91
+ number: Optional[int]
92
+ timestamp: Optional[int]
93
+ accounts_hash: Optional[bytes]
94
+ transactions_total_fees: Optional[int]
95
+ transactions_root_hash: Optional[bytes]
96
+ delay_difficulty: Optional[int]
97
+ delay_output: Optional[bytes]
98
+ validator_public_key: Optional[bytes]
99
+
100
+ # additional
101
+ body_hash: Optional[bytes]
102
+ signature: Optional[bytes]
103
+
104
+ def __init__(self) -> None:
105
+ # defaults for safety
106
+ self.hash = b""
107
+ self.previous_block = ZERO32
108
+ self.number = None
109
+ self.timestamp = None
110
+ self.accounts_hash = None
111
+ self.transactions_total_fees = None
112
+ self.transactions_root_hash = None
113
+ self.delay_difficulty = None
114
+ self.delay_output = None
115
+ self.validator_public_key = None
116
+ self.body_hash = None
117
+ self.signature = None
118
+
119
+ def to_atom(self) -> Tuple[bytes, List[Atom]]:
120
+ # Build body details as typed bytes, in defined order
121
+ details_ids: List[bytes] = []
122
+ atoms_acc: List[Atom] = []
123
+
124
+ def _emit(detail_bytes: bytes) -> None:
125
+ oid, ats = _make_typed_bytes(detail_bytes)
126
+ details_ids.append(oid)
127
+ atoms_acc.extend(ats)
128
+
129
+ # 0: previous_block
130
+ _emit(self.previous_block or ZERO32)
131
+ # 1: number
132
+ _emit(_int_to_be_bytes(self.number))
133
+ # 2: timestamp
134
+ _emit(_int_to_be_bytes(self.timestamp))
135
+ # 3: accounts_hash
136
+ _emit(self.accounts_hash or b"")
137
+ # 4: transactions_total_fees
138
+ _emit(_int_to_be_bytes(self.transactions_total_fees))
139
+ # 5: transactions_root_hash
140
+ _emit(self.transactions_root_hash or b"")
141
+ # 6: delay_difficulty
142
+ _emit(_int_to_be_bytes(self.delay_difficulty))
143
+ # 7: delay_output
144
+ _emit(self.delay_output or b"")
145
+ # 8: validator_public_key
146
+ _emit(self.validator_public_key or b"")
147
+
148
+ # Build body list
149
+ body_id, body_atoms = _make_list(details_ids)
150
+ atoms_acc.extend(body_atoms)
151
+ self.body_hash = body_id
152
+
153
+ # Type atom points to body list
154
+ type_atom = Atom.from_data(data=b"block", next_hash=body_id)
155
+
156
+ # Signature atom (raw byte payload)
157
+ sig_atom = Atom.from_data(data=self.signature or b"", next_hash=ZERO32)
158
+
159
+ # Main block list: [type_atom, body_list, signature]
160
+ main_id, main_atoms = _make_list([type_atom.object_id(), body_id, sig_atom.object_id()])
161
+ atoms_acc.append(type_atom)
162
+ atoms_acc.append(sig_atom)
163
+ atoms_acc.extend(main_atoms)
164
+
165
+ self.hash = main_id
166
+ return self.hash, atoms_acc
167
+
168
+ @classmethod
169
+ def from_atom(cls, storage_get: Callable[[bytes], Optional[Atom]], block_id: bytes) -> "Block":
170
+ # 1) Expect main list
171
+ main_typ = storage_get(block_id)
172
+ if main_typ is None or main_typ.data != b"list":
173
+ raise ValueError("not a block (main list missing)")
174
+ main_val = storage_get(main_typ.next)
175
+ if main_val is None:
176
+ raise ValueError("malformed block list (missing value)")
177
+ # length is little-endian u64 per storage format
178
+ if len(main_val.data) < 1:
179
+ raise ValueError("malformed block list (length)")
180
+ head = main_val.next
181
+
182
+ # read first 2 elements: [type_atom_id, body_list_id]
183
+ first_elem = storage_get(head)
184
+ if first_elem is None:
185
+ raise ValueError("malformed block list (head element)")
186
+ type_atom_id = first_elem.data
187
+ second_elem = storage_get(first_elem.next)
188
+ if second_elem is None:
189
+ raise ValueError("malformed block list (second element)")
190
+ body_list_id = second_elem.data
191
+ # optional 3rd element: signature atom id
192
+ third_elem = storage_get(second_elem.next) if second_elem.next else None
193
+ sig_atom_id: Optional[bytes] = third_elem.data if third_elem is not None else None
194
+
195
+ # 2) Validate type atom and linkage to body
196
+ type_atom = storage_get(type_atom_id)
197
+ if type_atom is None or type_atom.data != b"block" or type_atom.next != body_list_id:
198
+ raise ValueError("not a block (type atom)")
199
+
200
+ # 3) Parse body list of details
201
+ body_typ = storage_get(body_list_id)
202
+ if body_typ is None or body_typ.data != b"list":
203
+ raise ValueError("malformed body (type)")
204
+ body_val = storage_get(body_typ.next)
205
+ if body_val is None:
206
+ raise ValueError("malformed body (value)")
207
+ cur_elem_id = body_val.next
208
+
209
+ def _read_typed_bytes(elem_id: bytes) -> bytes:
210
+ elem = storage_get(elem_id)
211
+ if elem is None:
212
+ return b""
213
+ child_id = elem.data
214
+ typ = storage_get(child_id)
215
+ if typ is None or typ.data != b"byte":
216
+ return b""
217
+ val = storage_get(typ.next)
218
+ return val.data if val is not None else b""
219
+
220
+ details: List[bytes] = []
221
+ # We read up to 9 fields if present
222
+ for _ in range(9):
223
+ if not cur_elem_id:
224
+ break
225
+ b = _read_typed_bytes(cur_elem_id)
226
+ details.append(b)
227
+ nxt = storage_get(cur_elem_id)
228
+ cur_elem_id = nxt.next if nxt is not None else b""
229
+
230
+ b = cls()
231
+ b.hash = block_id
232
+ b.body_hash = body_list_id
233
+
234
+ # Map details back per the defined order
235
+ get = lambda i: details[i] if i < len(details) else b""
236
+ b.previous_block = get(0) or ZERO32
237
+ b.number = _be_bytes_to_int(get(1))
238
+ b.timestamp = _be_bytes_to_int(get(2))
239
+ b.accounts_hash = get(3) or None
240
+ b.transactions_total_fees = _be_bytes_to_int(get(4))
241
+ b.transactions_root_hash = get(5) or None
242
+ b.delay_difficulty = _be_bytes_to_int(get(6))
243
+ b.delay_output = get(7) or None
244
+ b.validator_public_key = get(8) or None
245
+
246
+ # 4) Parse signature if present (supports raw or typed 'byte' atom)
247
+ if sig_atom_id is not None:
248
+ sa = storage_get(sig_atom_id)
249
+ if sa is not None:
250
+ if sa.data == b"byte":
251
+ sval = storage_get(sa.next)
252
+ b.signature = sval.data if sval is not None else b""
253
+ else:
254
+ b.signature = sa.data
255
+
256
+ return b
257
+
258
+ def validate(self, storage_get: Callable[[bytes], Optional[Atom]]) -> bool:
259
+ """Validate this block against storage.
260
+
261
+ Checks:
262
+ - Signature: signature must verify over the body list id using the
263
+ validator's public key.
264
+ - Timestamp monotonicity: if previous block exists (not ZERO32), this
265
+ block's timestamp must be >= previous.timestamp + 1.
266
+ """
267
+ # Unverifiable if critical fields are missing
268
+ if not self.body_hash:
269
+ return False
270
+ if not self.signature:
271
+ return False
272
+ if not self.validator_public_key:
273
+ return False
274
+ if self.timestamp is None:
275
+ return False
276
+
277
+ # 1) Signature check over body hash
278
+ try:
279
+ pub = Ed25519PublicKey.from_public_bytes(bytes(self.validator_public_key))
280
+ pub.verify(self.signature, self.body_hash)
281
+ except InvalidSignature as e:
282
+ raise ValueError("invalid signature") from e
283
+
284
+ # 2) Timestamp monotonicity against previous block
285
+ if self.previous_block and self.previous_block != ZERO32:
286
+ # If previous block cannot be loaded, treat as unverifiable, not malicious
287
+ try:
288
+ prev = Block.from_atom(storage_get, self.previous_block)
289
+ except Exception:
290
+ return False
291
+ prev_ts = int(prev.timestamp or 0)
292
+ cur_ts = int(self.timestamp or 0)
293
+ if cur_ts < prev_ts + 1:
294
+ raise ValueError("timestamp must be at least prev+1")
295
+
296
+ return True
@@ -0,0 +1,63 @@
1
+ # chain.py
2
+ from typing import Callable, Dict, Optional
3
+ from .block import Block
4
+ from .._storage.atom import ZERO32, Atom
5
+
6
+ class Chain:
7
+ def __init__(self, head_block: Block):
8
+ self.head_block = head_block
9
+ self.validated_upto_block = None
10
+ # Root (genesis) hash for this chain; set by validation setup when known
11
+ self.root: Optional[bytes] = None
12
+ # Fork position: the head hash of the default/current fork for this chain
13
+ self.fork_position: Optional[bytes] = getattr(head_block, "hash", None)
14
+ # Mark the first malicious block encountered during validation; None means not found
15
+ self.malicious_block_hash: Optional[bytes] = None
16
+
17
+ def validate(self, storage_get: Callable[[bytes], Atom]) -> Block:
18
+ """Validate the chain from head to genesis and return the root block.
19
+
20
+ Incorporates per-block validation (signature on body and timestamp
21
+ monotonicity). Uses a simple cache to avoid duplicate Atom fetches and
22
+ duplicate block decoding during the backward walk.
23
+ """
24
+ # Atom and Block caches for this validation pass
25
+ atom_cache: Dict[bytes, Optional[Atom]] = {}
26
+ block_cache: Dict[bytes, Block] = {}
27
+
28
+ def get_cached(k: bytes) -> Optional[Atom]:
29
+ if k in atom_cache:
30
+ return atom_cache[k]
31
+ a = storage_get(k)
32
+ atom_cache[k] = a
33
+ return a
34
+
35
+ def load_block(bid: bytes) -> Block:
36
+ if bid in block_cache:
37
+ return block_cache[bid]
38
+ b = Block.from_atom(get_cached, bid) # type: ignore[arg-type]
39
+ block_cache[bid] = b
40
+ return b
41
+
42
+ blk = self.head_block
43
+ # Ensure head is in cache if it has a hash
44
+ if getattr(blk, "hash", None):
45
+ block_cache[blk.hash] = blk # type: ignore[attr-defined]
46
+
47
+ # Walk back, validating each block
48
+ while True:
49
+ # Validate current block (signature over body, timestamp rule)
50
+ try:
51
+ blk.validate(get_cached) # may decode previous but uses cached atoms
52
+ except Exception:
53
+ # record first failure point then propagate
54
+ self.malicious_block_hash = getattr(blk, "hash", None)
55
+ raise
56
+
57
+ if blk.previous_block == ZERO32:
58
+ break
59
+ # Move to previous block using cache-aware loader
60
+ blk = load_block(blk.previous_block)
61
+
62
+ self.validated_upto_block = blk
63
+ return blk
@@ -0,0 +1,98 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Optional, Set, Any, Callable, Dict
4
+ from .block import Block
5
+ from .._storage.atom import ZERO32, Atom
6
+
7
+
8
+ class Fork:
9
+ """A branch head within a Chain (same root).
10
+
11
+ - head: current tip block id (bytes)
12
+ - peers: identifiers (e.g., peer pubkey objects) following this head
13
+ - root: genesis block id for this chain (optional)
14
+ - validated_upto: earliest verified ancestor (optional)
15
+ - chain_fork_position: the chain's fork anchor relevant to this fork
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ head: bytes,
21
+ ) -> None:
22
+ self.head: bytes = head
23
+ self.peers: Set[Any] = set()
24
+ self.root: Optional[bytes] = None
25
+ self.validated_upto: Optional[bytes] = None
26
+ self.chain_fork_position: Optional[bytes] = None
27
+ # Mark the first block found malicious during validation; None means not found
28
+ self.malicious_block_hash: Optional[bytes] = None
29
+
30
+ def add_peer(self, peer_id: Any) -> None:
31
+ self.peers.add(peer_id)
32
+
33
+ def remove_peer(self, peer_id: Any) -> None:
34
+ self.peers.discard(peer_id)
35
+
36
+ def validate(
37
+ self,
38
+ storage_get: Callable[[bytes], Optional[object]],
39
+ stop_heads: Optional[Set[bytes]] = None,
40
+ ) -> bool:
41
+ """Validate only up to the chain fork position, not genesis.
42
+
43
+ Returns True if self.head descends from self.chain_fork_position (or if
44
+ chain_fork_position is None/equals head), and updates validated_upto to
45
+ that anchor. If stop_heads is provided, returns True early if ancestry
46
+ reaches any of those heads, setting validated_upto to the matched head.
47
+ Returns False if ancestry cannot be confirmed.
48
+ """
49
+ if self.chain_fork_position is None or self.chain_fork_position == self.head:
50
+ self.validated_upto = self.head
51
+ return True
52
+ # Caches to avoid double fetching/decoding
53
+ atom_cache: Dict[bytes, Optional[Atom]] = {}
54
+ block_cache: Dict[bytes, Block] = {}
55
+
56
+ def get_cached(k: bytes) -> Optional[Atom]:
57
+ if k in atom_cache:
58
+ return atom_cache[k]
59
+ a = storage_get(k) # type: ignore[call-arg]
60
+ atom_cache[k] = a # may be None if missing
61
+ return a
62
+
63
+ def load_block(bid: bytes) -> Optional[Block]:
64
+ if bid in block_cache:
65
+ return block_cache[bid]
66
+ try:
67
+ b = Block.from_atom(get_cached, bid) # type: ignore[arg-type]
68
+ except Exception:
69
+ return None
70
+ block_cache[bid] = b
71
+ return b
72
+
73
+ blk = load_block(self.head)
74
+ if blk is None:
75
+ # Missing head data: unverifiable, not malicious
76
+ return False
77
+ # Walk up to fork anchor, validating each block signature + timestamp
78
+ while True:
79
+ try:
80
+ blk.validate(get_cached) # type: ignore[arg-type]
81
+ except Exception:
82
+ # mark the first failure point
83
+ self.malicious_block_hash = blk.hash
84
+ return False
85
+
86
+ # Early-exit if we met another known fork head
87
+ if stop_heads and blk.hash in stop_heads:
88
+ self.validated_upto = blk.hash
89
+ return True
90
+
91
+ if blk.hash == self.chain_fork_position:
92
+ self.validated_upto = blk.hash
93
+ return True
94
+
95
+ nxt = load_block(blk.previous_block)
96
+ if nxt is None:
97
+ return False
98
+ blk = nxt
File without changes
@@ -0,0 +1,221 @@
1
+ from __future__ import annotations
2
+
3
+ import threading
4
+ import time
5
+ from dataclasses import dataclass
6
+ from queue import Empty, Queue
7
+ from typing import Any, Dict, Optional, Tuple
8
+
9
+ from cryptography.hazmat.primitives import serialization
10
+
11
+ from .block import Block
12
+ from .chain import Chain
13
+ from .fork import Fork
14
+ from .._storage.atom import ZERO32, Atom
15
+
16
+
17
+ @dataclass
18
+ class Transaction:
19
+ """Lightweight transaction view for validation processing."""
20
+
21
+ recipient: bytes
22
+ sender: bytes
23
+ amount: int
24
+ counter: int
25
+
26
+
27
+ def current_validator(node: Any) -> bytes:
28
+ """Return the current validator identifier. Override downstream."""
29
+ raise NotImplementedError("current_validator must be implemented by the host node")
30
+
31
+
32
+ def apply_transaction(node: Any, block: object, transaction_hash: bytes) -> None:
33
+ """Apply transaction to the candidate block. Override downstream."""
34
+ pass
35
+
36
+
37
+ def validation_setup(node: Any) -> None:
38
+ # Shared state
39
+ node.validation_lock = getattr(node, "validation_lock", threading.RLock())
40
+
41
+ # Public maps per your spec
42
+ # - chains: Dict[root, Chain]
43
+ # - forks: Dict[head, Fork]
44
+ node.chains = getattr(node, "chains", {})
45
+ node.forks = getattr(node, "forks", {})
46
+
47
+ # Pending transactions queue (hash-only entries)
48
+ node._validation_transaction_queue = getattr(
49
+ node, "_validation_transaction_queue", Queue()
50
+ )
51
+ # Single work queue of grouped items: (latest_block_hash, set(peer_ids))
52
+ node._validation_verify_queue = getattr(
53
+ node, "_validation_verify_queue", Queue()
54
+ )
55
+ node._validation_stop_event = getattr(
56
+ node, "_validation_stop_event", threading.Event()
57
+ )
58
+
59
+ def enqueue_transaction_hash(tx_hash: bytes) -> None:
60
+ """Schedule a transaction hash for validation processing."""
61
+ if not isinstance(tx_hash, (bytes, bytearray)):
62
+ raise TypeError("transaction hash must be bytes-like")
63
+ node._validation_transaction_queue.put(bytes(tx_hash))
64
+
65
+ node.enqueue_transaction_hash = enqueue_transaction_hash
66
+
67
+ def _process_peers_latest_block(latest_block_hash: bytes, peer_ids: set[Any]) -> None:
68
+ """Assign a peer to a fork for its latest block without merging forks.
69
+
70
+ Flow:
71
+ - Create a new Fork for `latest_block_hash` and validate it, using
72
+ stop_heads composed of current fork heads to short-circuit when
73
+ ancestry meets an existing fork head.
74
+ - If a matching fork head is found and is not malicious, copy its
75
+ structural fields (root, validated_upto, chain_fork_position) onto
76
+ the new fork.
77
+ - Add all peers in `peer_ids` to the new fork and remove each from any
78
+ previous fork they followed.
79
+ - Persist the new fork under `node.forks[latest_block_hash]`.
80
+ """
81
+ new_fork = Fork(head=latest_block_hash)
82
+
83
+ current_fork_heads = {fk.head for fk in node.forks.values() if fk.head != latest_block_hash}
84
+
85
+ new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
86
+
87
+ # update new_fork with details of the fork with head of validated_upto
88
+ if new_fork.validated_upto and new_fork.validated_upto in node.forks:
89
+ ref = node.forks[new_fork.validated_upto]
90
+ # if the matched fork is malicious, disregard this new fork entirely
91
+ if getattr(ref, "malicious_block_hash", None):
92
+ return
93
+ # copy structural fields exactly
94
+ new_fork.root = ref.root
95
+ new_fork.validated_upto = ref.validated_upto
96
+ new_fork.chain_fork_position = ref.chain_fork_position
97
+
98
+ # add peers to new fork and remove them from any old forks
99
+ for peer_id in peer_ids:
100
+ new_fork.add_peer(peer_id)
101
+ # Remove this peer from all other forks
102
+ for h, fk in list(node.forks.items()):
103
+ if h != latest_block_hash:
104
+ fk.remove_peer(peer_id)
105
+
106
+ # persist the fork
107
+ node.forks[latest_block_hash] = new_fork
108
+
109
+
110
+ # Discovery worker: watches peers and enqueues head changes
111
+ def _discovery_worker():
112
+ stop = node._validation_stop_event
113
+ while not stop.is_set():
114
+ try:
115
+ peers = getattr(node, "peers", None)
116
+ if isinstance(peers, dict):
117
+ # Snapshot as (peer_id, latest_block_hash) pairs
118
+ pairs = [
119
+ (peer_id, bytes(latest))
120
+ for peer_id, peer in list(peers.items())
121
+ if isinstance((latest := getattr(peer, "latest_block", None)), (bytes, bytearray)) and latest
122
+ ]
123
+ # Group peers by latest block hash
124
+ latest_keys = {hb for _, hb in pairs}
125
+ grouped: Dict[bytes, set[Any]] = {
126
+ hb: {pid for pid, phb in pairs if phb == hb}
127
+ for hb in latest_keys
128
+ }
129
+
130
+ # Replace queue contents with current groups
131
+ try:
132
+ while True:
133
+ node._validation_verify_queue.get_nowait()
134
+ except Empty:
135
+ pass
136
+ for latest_b, peer_set in grouped.items():
137
+ node._validation_verify_queue.put((latest_b, peer_set))
138
+ except Exception:
139
+ pass
140
+ finally:
141
+ time.sleep(0.5)
142
+
143
+ # Verification worker: computes root/height and applies peer→fork assignment
144
+ def _verify_worker():
145
+ stop = node._validation_stop_event
146
+ while not stop.is_set():
147
+ # Take a snapshot of all currently queued groups
148
+ batch: list[tuple[bytes, set[Any]]] = []
149
+ try:
150
+ while True:
151
+ item = node._validation_verify_queue.get_nowait()
152
+ batch.append(item)
153
+ except Empty:
154
+ pass
155
+
156
+ if not batch:
157
+ time.sleep(0.1)
158
+ continue
159
+
160
+ # Process the snapshot; new items enqueued during processing
161
+ # will be handled in the next iteration
162
+ for latest_b, peers in batch:
163
+ try:
164
+ _process_peers_latest_block(latest_b, peers)
165
+ except Exception:
166
+ pass
167
+
168
+ def _validation_worker() -> None:
169
+ """Consume pending transactions when scheduled to validate."""
170
+ stop = node._validation_stop_event
171
+ while not stop.is_set():
172
+ validation_public_key = getattr(node, "validation_public_key", None)
173
+ if not validation_public_key:
174
+ time.sleep(0.5)
175
+ continue
176
+
177
+ scheduled_validator = current_validator(node)
178
+
179
+ if scheduled_validator != validation_public_key:
180
+ time.sleep(0.5)
181
+ continue
182
+
183
+ try:
184
+ current_hash = node._validation_transaction_queue.get_nowait()
185
+ except Empty:
186
+ time.sleep(0.1)
187
+ continue
188
+
189
+ new_block = Block()
190
+ new_block.validator_public_key = getattr(node, "validation_public_key", None)
191
+
192
+ while True:
193
+ try:
194
+ apply_transaction(node, new_block, current_hash)
195
+ except NotImplementedError:
196
+ node._validation_transaction_queue.put(current_hash)
197
+ time.sleep(0.5)
198
+ break
199
+ except Exception:
200
+ # Skip problematic transaction; leave block as-is.
201
+ pass
202
+
203
+ try:
204
+ current_hash = node._validation_transaction_queue.get_nowait()
205
+ except Empty:
206
+ break
207
+
208
+ # Start workers as daemons
209
+ node.validation_discovery_thread = threading.Thread(
210
+ target=_discovery_worker, daemon=True, name="validation-discovery"
211
+ )
212
+ node.validation_verify_thread = threading.Thread(
213
+ target=_verify_worker, daemon=True, name="validation-verify"
214
+ )
215
+ node.validation_worker_thread = threading.Thread(
216
+ target=_validation_worker, daemon=True, name="validation-worker"
217
+ )
218
+ node.validation_discovery_thread.start()
219
+ node.validation_verify_thread.start()
220
+ if getattr(node, "validation_secret_key", None):
221
+ node.validation_worker_thread.start()