astreum 0.2.36__py3-none-any.whl → 0.2.37__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of astreum might be problematic. Click here for more details.
- astreum/__init__.py +9 -1
- astreum/_communication/__init__.py +9 -0
- astreum/_communication/peer.py +11 -0
- astreum/_communication/route.py +25 -0
- astreum/_communication/setup.py +104 -0
- astreum/_lispeum/__init__.py +3 -3
- astreum/_lispeum/environment.py +5 -2
- astreum/_lispeum/expression.py +7 -7
- astreum/_lispeum/high_evaluation.py +8 -8
- astreum/_lispeum/low_evaluation.py +10 -8
- astreum/_lispeum/parser.py +25 -9
- astreum/_node.py +24 -113
- astreum/_storage/__init__.py +5 -0
- astreum/_storage/atom.py +100 -0
- astreum/_validation/__init__.py +12 -0
- astreum/_validation/block.py +296 -0
- astreum/_validation/chain.py +63 -0
- astreum/_validation/fork.py +98 -0
- astreum/_validation/genesis.py +0 -0
- astreum/_validation/setup.py +141 -0
- astreum/models/block.py +18 -9
- {astreum-0.2.36.dist-info → astreum-0.2.37.dist-info}/METADATA +4 -2
- {astreum-0.2.36.dist-info → astreum-0.2.37.dist-info}/RECORD +26 -14
- {astreum-0.2.36.dist-info → astreum-0.2.37.dist-info}/WHEEL +0 -0
- {astreum-0.2.36.dist-info → astreum-0.2.37.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.2.36.dist-info → astreum-0.2.37.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
|
|
2
|
+
from typing import Callable, List, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
from .._storage.atom import Atom, ZERO32
|
|
5
|
+
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PublicKey
|
|
6
|
+
from cryptography.exceptions import InvalidSignature
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _int_to_be_bytes(n: Optional[int]) -> bytes:
|
|
10
|
+
if n is None:
|
|
11
|
+
return b""
|
|
12
|
+
n = int(n)
|
|
13
|
+
if n == 0:
|
|
14
|
+
return b"\x00"
|
|
15
|
+
size = (n.bit_length() + 7) // 8
|
|
16
|
+
return n.to_bytes(size, "big")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _be_bytes_to_int(b: Optional[bytes]) -> int:
|
|
20
|
+
if not b:
|
|
21
|
+
return 0
|
|
22
|
+
return int.from_bytes(b, "big")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _make_typed_bytes(data: bytes) -> Tuple[bytes, List[Atom]]:
|
|
26
|
+
"""Create a typed 'byte' atom for the given payload.
|
|
27
|
+
|
|
28
|
+
Returns (object_id, atoms_in_dependency_order).
|
|
29
|
+
"""
|
|
30
|
+
val = Atom.from_data(data=data)
|
|
31
|
+
typ = Atom.from_data(data=b"byte", next_hash=val.object_id())
|
|
32
|
+
return typ.object_id(), [val, typ]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _make_list(child_ids: List[bytes]) -> Tuple[bytes, List[Atom]]:
|
|
36
|
+
"""Create a typed 'list' atom for child object ids.
|
|
37
|
+
|
|
38
|
+
Encodes elements as a linked chain of element-atoms with data=child_id and
|
|
39
|
+
next pointing to the next element's object id. The list value atom contains
|
|
40
|
+
the element count and points to the head of the element chain. The type atom
|
|
41
|
+
identifies the structure as a list.
|
|
42
|
+
"""
|
|
43
|
+
acc: List[Atom] = []
|
|
44
|
+
next_hash = ZERO32
|
|
45
|
+
elem_atoms: List[Atom] = []
|
|
46
|
+
# Build element chain in reverse, then flip to maintain forward order
|
|
47
|
+
for h in reversed(child_ids):
|
|
48
|
+
a = Atom.from_data(data=h, next_hash=next_hash)
|
|
49
|
+
next_hash = a.object_id()
|
|
50
|
+
elem_atoms.append(a)
|
|
51
|
+
elem_atoms.reverse()
|
|
52
|
+
head = next_hash
|
|
53
|
+
val = Atom.from_data(data=(len(child_ids)).to_bytes(8, "little"), next_hash=head)
|
|
54
|
+
typ = Atom.from_data(data=b"list", next_hash=val.object_id())
|
|
55
|
+
return typ.object_id(), acc + elem_atoms + [val, typ]
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class Block:
|
|
59
|
+
"""Validation Block representation using Atom storage.
|
|
60
|
+
|
|
61
|
+
Top-level encoding:
|
|
62
|
+
block_id = list([ type_atom, body_list, signature_atom ])
|
|
63
|
+
where: type_atom = Atom(data=b"block", next=body_list_id)
|
|
64
|
+
body_list = list([...details...])
|
|
65
|
+
signature_atom = Atom(data=<signature-bytes>)
|
|
66
|
+
|
|
67
|
+
Details order in body_list:
|
|
68
|
+
0: previous_block (bytes)
|
|
69
|
+
1: number (int → big-endian bytes)
|
|
70
|
+
2: timestamp (int → big-endian bytes)
|
|
71
|
+
3: accounts_hash (bytes)
|
|
72
|
+
4: transactions_total_fees (int → big-endian bytes)
|
|
73
|
+
5: transactions_root_hash (bytes)
|
|
74
|
+
6: delay_difficulty (int → big-endian bytes)
|
|
75
|
+
7: delay_output (bytes)
|
|
76
|
+
8: validator_public_key (bytes)
|
|
77
|
+
|
|
78
|
+
Notes:
|
|
79
|
+
- "body tree" is represented here by the body_list id (self.body_hash), not
|
|
80
|
+
embedded again as a field to avoid circular references.
|
|
81
|
+
- "signature" is a field on the class but is not required for validation
|
|
82
|
+
navigation; include it in the instance but it is not encoded in atoms
|
|
83
|
+
unless explicitly provided via details extension in the future.
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
# essential identifiers
|
|
87
|
+
hash: bytes
|
|
88
|
+
previous_block: bytes
|
|
89
|
+
|
|
90
|
+
# block details
|
|
91
|
+
number: Optional[int]
|
|
92
|
+
timestamp: Optional[int]
|
|
93
|
+
accounts_hash: Optional[bytes]
|
|
94
|
+
transactions_total_fees: Optional[int]
|
|
95
|
+
transactions_root_hash: Optional[bytes]
|
|
96
|
+
delay_difficulty: Optional[int]
|
|
97
|
+
delay_output: Optional[bytes]
|
|
98
|
+
validator_public_key: Optional[bytes]
|
|
99
|
+
|
|
100
|
+
# additional
|
|
101
|
+
body_hash: Optional[bytes]
|
|
102
|
+
signature: Optional[bytes]
|
|
103
|
+
|
|
104
|
+
def __init__(self) -> None:
|
|
105
|
+
# defaults for safety
|
|
106
|
+
self.hash = b""
|
|
107
|
+
self.previous_block = ZERO32
|
|
108
|
+
self.number = None
|
|
109
|
+
self.timestamp = None
|
|
110
|
+
self.accounts_hash = None
|
|
111
|
+
self.transactions_total_fees = None
|
|
112
|
+
self.transactions_root_hash = None
|
|
113
|
+
self.delay_difficulty = None
|
|
114
|
+
self.delay_output = None
|
|
115
|
+
self.validator_public_key = None
|
|
116
|
+
self.body_hash = None
|
|
117
|
+
self.signature = None
|
|
118
|
+
|
|
119
|
+
def to_atom(self) -> Tuple[bytes, List[Atom]]:
|
|
120
|
+
# Build body details as typed bytes, in defined order
|
|
121
|
+
details_ids: List[bytes] = []
|
|
122
|
+
atoms_acc: List[Atom] = []
|
|
123
|
+
|
|
124
|
+
def _emit(detail_bytes: bytes) -> None:
|
|
125
|
+
oid, ats = _make_typed_bytes(detail_bytes)
|
|
126
|
+
details_ids.append(oid)
|
|
127
|
+
atoms_acc.extend(ats)
|
|
128
|
+
|
|
129
|
+
# 0: previous_block
|
|
130
|
+
_emit(self.previous_block or ZERO32)
|
|
131
|
+
# 1: number
|
|
132
|
+
_emit(_int_to_be_bytes(self.number))
|
|
133
|
+
# 2: timestamp
|
|
134
|
+
_emit(_int_to_be_bytes(self.timestamp))
|
|
135
|
+
# 3: accounts_hash
|
|
136
|
+
_emit(self.accounts_hash or b"")
|
|
137
|
+
# 4: transactions_total_fees
|
|
138
|
+
_emit(_int_to_be_bytes(self.transactions_total_fees))
|
|
139
|
+
# 5: transactions_root_hash
|
|
140
|
+
_emit(self.transactions_root_hash or b"")
|
|
141
|
+
# 6: delay_difficulty
|
|
142
|
+
_emit(_int_to_be_bytes(self.delay_difficulty))
|
|
143
|
+
# 7: delay_output
|
|
144
|
+
_emit(self.delay_output or b"")
|
|
145
|
+
# 8: validator_public_key
|
|
146
|
+
_emit(self.validator_public_key or b"")
|
|
147
|
+
|
|
148
|
+
# Build body list
|
|
149
|
+
body_id, body_atoms = _make_list(details_ids)
|
|
150
|
+
atoms_acc.extend(body_atoms)
|
|
151
|
+
self.body_hash = body_id
|
|
152
|
+
|
|
153
|
+
# Type atom points to body list
|
|
154
|
+
type_atom = Atom.from_data(data=b"block", next_hash=body_id)
|
|
155
|
+
|
|
156
|
+
# Signature atom (raw byte payload)
|
|
157
|
+
sig_atom = Atom.from_data(data=self.signature or b"", next_hash=ZERO32)
|
|
158
|
+
|
|
159
|
+
# Main block list: [type_atom, body_list, signature]
|
|
160
|
+
main_id, main_atoms = _make_list([type_atom.object_id(), body_id, sig_atom.object_id()])
|
|
161
|
+
atoms_acc.append(type_atom)
|
|
162
|
+
atoms_acc.append(sig_atom)
|
|
163
|
+
atoms_acc.extend(main_atoms)
|
|
164
|
+
|
|
165
|
+
self.hash = main_id
|
|
166
|
+
return self.hash, atoms_acc
|
|
167
|
+
|
|
168
|
+
@classmethod
|
|
169
|
+
def from_atom(cls, storage_get: Callable[[bytes], Optional[Atom]], block_id: bytes) -> "Block":
|
|
170
|
+
# 1) Expect main list
|
|
171
|
+
main_typ = storage_get(block_id)
|
|
172
|
+
if main_typ is None or main_typ.data != b"list":
|
|
173
|
+
raise ValueError("not a block (main list missing)")
|
|
174
|
+
main_val = storage_get(main_typ.next)
|
|
175
|
+
if main_val is None:
|
|
176
|
+
raise ValueError("malformed block list (missing value)")
|
|
177
|
+
# length is little-endian u64 per storage format
|
|
178
|
+
if len(main_val.data) < 1:
|
|
179
|
+
raise ValueError("malformed block list (length)")
|
|
180
|
+
head = main_val.next
|
|
181
|
+
|
|
182
|
+
# read first 2 elements: [type_atom_id, body_list_id]
|
|
183
|
+
first_elem = storage_get(head)
|
|
184
|
+
if first_elem is None:
|
|
185
|
+
raise ValueError("malformed block list (head element)")
|
|
186
|
+
type_atom_id = first_elem.data
|
|
187
|
+
second_elem = storage_get(first_elem.next)
|
|
188
|
+
if second_elem is None:
|
|
189
|
+
raise ValueError("malformed block list (second element)")
|
|
190
|
+
body_list_id = second_elem.data
|
|
191
|
+
# optional 3rd element: signature atom id
|
|
192
|
+
third_elem = storage_get(second_elem.next) if second_elem.next else None
|
|
193
|
+
sig_atom_id: Optional[bytes] = third_elem.data if third_elem is not None else None
|
|
194
|
+
|
|
195
|
+
# 2) Validate type atom and linkage to body
|
|
196
|
+
type_atom = storage_get(type_atom_id)
|
|
197
|
+
if type_atom is None or type_atom.data != b"block" or type_atom.next != body_list_id:
|
|
198
|
+
raise ValueError("not a block (type atom)")
|
|
199
|
+
|
|
200
|
+
# 3) Parse body list of details
|
|
201
|
+
body_typ = storage_get(body_list_id)
|
|
202
|
+
if body_typ is None or body_typ.data != b"list":
|
|
203
|
+
raise ValueError("malformed body (type)")
|
|
204
|
+
body_val = storage_get(body_typ.next)
|
|
205
|
+
if body_val is None:
|
|
206
|
+
raise ValueError("malformed body (value)")
|
|
207
|
+
cur_elem_id = body_val.next
|
|
208
|
+
|
|
209
|
+
def _read_typed_bytes(elem_id: bytes) -> bytes:
|
|
210
|
+
elem = storage_get(elem_id)
|
|
211
|
+
if elem is None:
|
|
212
|
+
return b""
|
|
213
|
+
child_id = elem.data
|
|
214
|
+
typ = storage_get(child_id)
|
|
215
|
+
if typ is None or typ.data != b"byte":
|
|
216
|
+
return b""
|
|
217
|
+
val = storage_get(typ.next)
|
|
218
|
+
return val.data if val is not None else b""
|
|
219
|
+
|
|
220
|
+
details: List[bytes] = []
|
|
221
|
+
# We read up to 9 fields if present
|
|
222
|
+
for _ in range(9):
|
|
223
|
+
if not cur_elem_id:
|
|
224
|
+
break
|
|
225
|
+
b = _read_typed_bytes(cur_elem_id)
|
|
226
|
+
details.append(b)
|
|
227
|
+
nxt = storage_get(cur_elem_id)
|
|
228
|
+
cur_elem_id = nxt.next if nxt is not None else b""
|
|
229
|
+
|
|
230
|
+
b = cls()
|
|
231
|
+
b.hash = block_id
|
|
232
|
+
b.body_hash = body_list_id
|
|
233
|
+
|
|
234
|
+
# Map details back per the defined order
|
|
235
|
+
get = lambda i: details[i] if i < len(details) else b""
|
|
236
|
+
b.previous_block = get(0) or ZERO32
|
|
237
|
+
b.number = _be_bytes_to_int(get(1))
|
|
238
|
+
b.timestamp = _be_bytes_to_int(get(2))
|
|
239
|
+
b.accounts_hash = get(3) or None
|
|
240
|
+
b.transactions_total_fees = _be_bytes_to_int(get(4))
|
|
241
|
+
b.transactions_root_hash = get(5) or None
|
|
242
|
+
b.delay_difficulty = _be_bytes_to_int(get(6))
|
|
243
|
+
b.delay_output = get(7) or None
|
|
244
|
+
b.validator_public_key = get(8) or None
|
|
245
|
+
|
|
246
|
+
# 4) Parse signature if present (supports raw or typed 'byte' atom)
|
|
247
|
+
if sig_atom_id is not None:
|
|
248
|
+
sa = storage_get(sig_atom_id)
|
|
249
|
+
if sa is not None:
|
|
250
|
+
if sa.data == b"byte":
|
|
251
|
+
sval = storage_get(sa.next)
|
|
252
|
+
b.signature = sval.data if sval is not None else b""
|
|
253
|
+
else:
|
|
254
|
+
b.signature = sa.data
|
|
255
|
+
|
|
256
|
+
return b
|
|
257
|
+
|
|
258
|
+
def validate(self, storage_get: Callable[[bytes], Optional[Atom]]) -> bool:
|
|
259
|
+
"""Validate this block against storage.
|
|
260
|
+
|
|
261
|
+
Checks:
|
|
262
|
+
- Signature: signature must verify over the body list id using the
|
|
263
|
+
validator's public key.
|
|
264
|
+
- Timestamp monotonicity: if previous block exists (not ZERO32), this
|
|
265
|
+
block's timestamp must be >= previous.timestamp + 1.
|
|
266
|
+
"""
|
|
267
|
+
# Unverifiable if critical fields are missing
|
|
268
|
+
if not self.body_hash:
|
|
269
|
+
return False
|
|
270
|
+
if not self.signature:
|
|
271
|
+
return False
|
|
272
|
+
if not self.validator_public_key:
|
|
273
|
+
return False
|
|
274
|
+
if self.timestamp is None:
|
|
275
|
+
return False
|
|
276
|
+
|
|
277
|
+
# 1) Signature check over body hash
|
|
278
|
+
try:
|
|
279
|
+
pub = Ed25519PublicKey.from_public_bytes(bytes(self.validator_public_key))
|
|
280
|
+
pub.verify(self.signature, self.body_hash)
|
|
281
|
+
except InvalidSignature as e:
|
|
282
|
+
raise ValueError("invalid signature") from e
|
|
283
|
+
|
|
284
|
+
# 2) Timestamp monotonicity against previous block
|
|
285
|
+
if self.previous_block and self.previous_block != ZERO32:
|
|
286
|
+
# If previous block cannot be loaded, treat as unverifiable, not malicious
|
|
287
|
+
try:
|
|
288
|
+
prev = Block.from_atom(storage_get, self.previous_block)
|
|
289
|
+
except Exception:
|
|
290
|
+
return False
|
|
291
|
+
prev_ts = int(prev.timestamp or 0)
|
|
292
|
+
cur_ts = int(self.timestamp or 0)
|
|
293
|
+
if cur_ts < prev_ts + 1:
|
|
294
|
+
raise ValueError("timestamp must be at least prev+1")
|
|
295
|
+
|
|
296
|
+
return True
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# chain.py
|
|
2
|
+
from typing import Callable, Dict, Optional
|
|
3
|
+
from .block import Block
|
|
4
|
+
from .._storage.atom import ZERO32, Atom
|
|
5
|
+
|
|
6
|
+
class Chain:
|
|
7
|
+
def __init__(self, head_block: Block):
|
|
8
|
+
self.head_block = head_block
|
|
9
|
+
self.validated_upto_block = None
|
|
10
|
+
# Root (genesis) hash for this chain; set by validation setup when known
|
|
11
|
+
self.root: Optional[bytes] = None
|
|
12
|
+
# Fork position: the head hash of the default/current fork for this chain
|
|
13
|
+
self.fork_position: Optional[bytes] = getattr(head_block, "hash", None)
|
|
14
|
+
# Mark the first malicious block encountered during validation; None means not found
|
|
15
|
+
self.malicious_block_hash: Optional[bytes] = None
|
|
16
|
+
|
|
17
|
+
def validate(self, storage_get: Callable[[bytes], Atom]) -> Block:
|
|
18
|
+
"""Validate the chain from head to genesis and return the root block.
|
|
19
|
+
|
|
20
|
+
Incorporates per-block validation (signature on body and timestamp
|
|
21
|
+
monotonicity). Uses a simple cache to avoid duplicate Atom fetches and
|
|
22
|
+
duplicate block decoding during the backward walk.
|
|
23
|
+
"""
|
|
24
|
+
# Atom and Block caches for this validation pass
|
|
25
|
+
atom_cache: Dict[bytes, Optional[Atom]] = {}
|
|
26
|
+
block_cache: Dict[bytes, Block] = {}
|
|
27
|
+
|
|
28
|
+
def get_cached(k: bytes) -> Optional[Atom]:
|
|
29
|
+
if k in atom_cache:
|
|
30
|
+
return atom_cache[k]
|
|
31
|
+
a = storage_get(k)
|
|
32
|
+
atom_cache[k] = a
|
|
33
|
+
return a
|
|
34
|
+
|
|
35
|
+
def load_block(bid: bytes) -> Block:
|
|
36
|
+
if bid in block_cache:
|
|
37
|
+
return block_cache[bid]
|
|
38
|
+
b = Block.from_atom(get_cached, bid) # type: ignore[arg-type]
|
|
39
|
+
block_cache[bid] = b
|
|
40
|
+
return b
|
|
41
|
+
|
|
42
|
+
blk = self.head_block
|
|
43
|
+
# Ensure head is in cache if it has a hash
|
|
44
|
+
if getattr(blk, "hash", None):
|
|
45
|
+
block_cache[blk.hash] = blk # type: ignore[attr-defined]
|
|
46
|
+
|
|
47
|
+
# Walk back, validating each block
|
|
48
|
+
while True:
|
|
49
|
+
# Validate current block (signature over body, timestamp rule)
|
|
50
|
+
try:
|
|
51
|
+
blk.validate(get_cached) # may decode previous but uses cached atoms
|
|
52
|
+
except Exception:
|
|
53
|
+
# record first failure point then propagate
|
|
54
|
+
self.malicious_block_hash = getattr(blk, "hash", None)
|
|
55
|
+
raise
|
|
56
|
+
|
|
57
|
+
if blk.previous_block == ZERO32:
|
|
58
|
+
break
|
|
59
|
+
# Move to previous block using cache-aware loader
|
|
60
|
+
blk = load_block(blk.previous_block)
|
|
61
|
+
|
|
62
|
+
self.validated_upto_block = blk
|
|
63
|
+
return blk
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Optional, Set, Any, Callable, Dict
|
|
4
|
+
from .block import Block
|
|
5
|
+
from .._storage.atom import ZERO32, Atom
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Fork:
|
|
9
|
+
"""A branch head within a Chain (same root).
|
|
10
|
+
|
|
11
|
+
- head: current tip block id (bytes)
|
|
12
|
+
- peers: identifiers (e.g., peer pubkey objects) following this head
|
|
13
|
+
- root: genesis block id for this chain (optional)
|
|
14
|
+
- validated_upto: earliest verified ancestor (optional)
|
|
15
|
+
- chain_fork_position: the chain's fork anchor relevant to this fork
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
head: bytes,
|
|
21
|
+
) -> None:
|
|
22
|
+
self.head: bytes = head
|
|
23
|
+
self.peers: Set[Any] = set()
|
|
24
|
+
self.root: Optional[bytes] = None
|
|
25
|
+
self.validated_upto: Optional[bytes] = None
|
|
26
|
+
self.chain_fork_position: Optional[bytes] = None
|
|
27
|
+
# Mark the first block found malicious during validation; None means not found
|
|
28
|
+
self.malicious_block_hash: Optional[bytes] = None
|
|
29
|
+
|
|
30
|
+
def add_peer(self, peer_id: Any) -> None:
|
|
31
|
+
self.peers.add(peer_id)
|
|
32
|
+
|
|
33
|
+
def remove_peer(self, peer_id: Any) -> None:
|
|
34
|
+
self.peers.discard(peer_id)
|
|
35
|
+
|
|
36
|
+
def validate(
|
|
37
|
+
self,
|
|
38
|
+
storage_get: Callable[[bytes], Optional[object]],
|
|
39
|
+
stop_heads: Optional[Set[bytes]] = None,
|
|
40
|
+
) -> bool:
|
|
41
|
+
"""Validate only up to the chain fork position, not genesis.
|
|
42
|
+
|
|
43
|
+
Returns True if self.head descends from self.chain_fork_position (or if
|
|
44
|
+
chain_fork_position is None/equals head), and updates validated_upto to
|
|
45
|
+
that anchor. If stop_heads is provided, returns True early if ancestry
|
|
46
|
+
reaches any of those heads, setting validated_upto to the matched head.
|
|
47
|
+
Returns False if ancestry cannot be confirmed.
|
|
48
|
+
"""
|
|
49
|
+
if self.chain_fork_position is None or self.chain_fork_position == self.head:
|
|
50
|
+
self.validated_upto = self.head
|
|
51
|
+
return True
|
|
52
|
+
# Caches to avoid double fetching/decoding
|
|
53
|
+
atom_cache: Dict[bytes, Optional[Atom]] = {}
|
|
54
|
+
block_cache: Dict[bytes, Block] = {}
|
|
55
|
+
|
|
56
|
+
def get_cached(k: bytes) -> Optional[Atom]:
|
|
57
|
+
if k in atom_cache:
|
|
58
|
+
return atom_cache[k]
|
|
59
|
+
a = storage_get(k) # type: ignore[call-arg]
|
|
60
|
+
atom_cache[k] = a # may be None if missing
|
|
61
|
+
return a
|
|
62
|
+
|
|
63
|
+
def load_block(bid: bytes) -> Optional[Block]:
|
|
64
|
+
if bid in block_cache:
|
|
65
|
+
return block_cache[bid]
|
|
66
|
+
try:
|
|
67
|
+
b = Block.from_atom(get_cached, bid) # type: ignore[arg-type]
|
|
68
|
+
except Exception:
|
|
69
|
+
return None
|
|
70
|
+
block_cache[bid] = b
|
|
71
|
+
return b
|
|
72
|
+
|
|
73
|
+
blk = load_block(self.head)
|
|
74
|
+
if blk is None:
|
|
75
|
+
# Missing head data: unverifiable, not malicious
|
|
76
|
+
return False
|
|
77
|
+
# Walk up to fork anchor, validating each block signature + timestamp
|
|
78
|
+
while True:
|
|
79
|
+
try:
|
|
80
|
+
blk.validate(get_cached) # type: ignore[arg-type]
|
|
81
|
+
except Exception:
|
|
82
|
+
# mark the first failure point
|
|
83
|
+
self.malicious_block_hash = blk.hash
|
|
84
|
+
return False
|
|
85
|
+
|
|
86
|
+
# Early-exit if we met another known fork head
|
|
87
|
+
if stop_heads and blk.hash in stop_heads:
|
|
88
|
+
self.validated_upto = blk.hash
|
|
89
|
+
return True
|
|
90
|
+
|
|
91
|
+
if blk.hash == self.chain_fork_position:
|
|
92
|
+
self.validated_upto = blk.hash
|
|
93
|
+
return True
|
|
94
|
+
|
|
95
|
+
nxt = load_block(blk.previous_block)
|
|
96
|
+
if nxt is None:
|
|
97
|
+
return False
|
|
98
|
+
blk = nxt
|
|
File without changes
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import threading
|
|
4
|
+
import time
|
|
5
|
+
from queue import Queue, Empty
|
|
6
|
+
from typing import Any, Dict, Optional, Tuple
|
|
7
|
+
|
|
8
|
+
from .block import Block
|
|
9
|
+
from .chain import Chain
|
|
10
|
+
from .fork import Fork
|
|
11
|
+
from .._storage.atom import ZERO32, Atom
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def validation_setup(node: Any) -> None:
|
|
15
|
+
# Shared state
|
|
16
|
+
node.validation_lock = getattr(node, "validation_lock", threading.RLock())
|
|
17
|
+
|
|
18
|
+
# Public maps per your spec
|
|
19
|
+
# - chains: Dict[root, Chain]
|
|
20
|
+
# - forks: Dict[head, Fork]
|
|
21
|
+
node.chains = getattr(node, "chains", {})
|
|
22
|
+
node.forks = getattr(node, "forks", {})
|
|
23
|
+
|
|
24
|
+
# Single work queue of grouped items: (latest_block_hash, set(peer_ids))
|
|
25
|
+
node._validation_verify_queue = getattr(
|
|
26
|
+
node, "_validation_verify_queue", Queue()
|
|
27
|
+
)
|
|
28
|
+
node._validation_stop_event = getattr(
|
|
29
|
+
node, "_validation_stop_event", threading.Event()
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
def _process_peers_latest_block(latest_block_hash: bytes, peer_ids: set[Any]) -> None:
|
|
33
|
+
"""Assign a peer to a fork for its latest block without merging forks.
|
|
34
|
+
|
|
35
|
+
Flow:
|
|
36
|
+
- Create a new Fork for `latest_block_hash` and validate it, using
|
|
37
|
+
stop_heads composed of current fork heads to short-circuit when
|
|
38
|
+
ancestry meets an existing fork head.
|
|
39
|
+
- If a matching fork head is found and is not malicious, copy its
|
|
40
|
+
structural fields (root, validated_upto, chain_fork_position) onto
|
|
41
|
+
the new fork.
|
|
42
|
+
- Add all peers in `peer_ids` to the new fork and remove each from any
|
|
43
|
+
previous fork they followed.
|
|
44
|
+
- Persist the new fork under `node.forks[latest_block_hash]`.
|
|
45
|
+
"""
|
|
46
|
+
new_fork = Fork(head=latest_block_hash)
|
|
47
|
+
|
|
48
|
+
current_fork_heads = {fk.head for fk in node.forks.values() if fk.head != latest_block_hash}
|
|
49
|
+
|
|
50
|
+
new_fork.validate(storage_get=node._local_get, stop_heads=current_fork_heads)
|
|
51
|
+
|
|
52
|
+
# update new_fork with details of the fork with head of validated_upto
|
|
53
|
+
if new_fork.validated_upto and new_fork.validated_upto in node.forks:
|
|
54
|
+
ref = node.forks[new_fork.validated_upto]
|
|
55
|
+
# if the matched fork is malicious, disregard this new fork entirely
|
|
56
|
+
if getattr(ref, "malicious_block_hash", None):
|
|
57
|
+
return
|
|
58
|
+
# copy structural fields exactly
|
|
59
|
+
new_fork.root = ref.root
|
|
60
|
+
new_fork.validated_upto = ref.validated_upto
|
|
61
|
+
new_fork.chain_fork_position = ref.chain_fork_position
|
|
62
|
+
|
|
63
|
+
# add peers to new fork and remove them from any old forks
|
|
64
|
+
for peer_id in peer_ids:
|
|
65
|
+
new_fork.add_peer(peer_id)
|
|
66
|
+
# Remove this peer from all other forks
|
|
67
|
+
for h, fk in list(node.forks.items()):
|
|
68
|
+
if h != latest_block_hash:
|
|
69
|
+
fk.remove_peer(peer_id)
|
|
70
|
+
|
|
71
|
+
# persist the fork
|
|
72
|
+
node.forks[latest_block_hash] = new_fork
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# Discovery worker: watches peers and enqueues head changes
|
|
76
|
+
def _discovery_worker():
|
|
77
|
+
stop = node._validation_stop_event
|
|
78
|
+
while not stop.is_set():
|
|
79
|
+
try:
|
|
80
|
+
peers = getattr(node, "peers", None)
|
|
81
|
+
if isinstance(peers, dict):
|
|
82
|
+
# Snapshot as (peer_id, latest_block_hash) pairs
|
|
83
|
+
pairs = [
|
|
84
|
+
(peer_id, bytes(latest))
|
|
85
|
+
for peer_id, peer in list(peers.items())
|
|
86
|
+
if isinstance((latest := getattr(peer, "latest_block", None)), (bytes, bytearray)) and latest
|
|
87
|
+
]
|
|
88
|
+
# Group peers by latest block hash
|
|
89
|
+
latest_keys = {hb for _, hb in pairs}
|
|
90
|
+
grouped: Dict[bytes, set[Any]] = {
|
|
91
|
+
hb: {pid for pid, phb in pairs if phb == hb}
|
|
92
|
+
for hb in latest_keys
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# Replace queue contents with current groups
|
|
96
|
+
try:
|
|
97
|
+
while True:
|
|
98
|
+
node._validation_verify_queue.get_nowait()
|
|
99
|
+
except Empty:
|
|
100
|
+
pass
|
|
101
|
+
for latest_b, peer_set in grouped.items():
|
|
102
|
+
node._validation_verify_queue.put((latest_b, peer_set))
|
|
103
|
+
except Exception:
|
|
104
|
+
pass
|
|
105
|
+
finally:
|
|
106
|
+
time.sleep(0.5)
|
|
107
|
+
|
|
108
|
+
# Verification worker: computes root/height and applies peer→fork assignment
|
|
109
|
+
def _verify_worker():
|
|
110
|
+
stop = node._validation_stop_event
|
|
111
|
+
while not stop.is_set():
|
|
112
|
+
# Take a snapshot of all currently queued groups
|
|
113
|
+
batch: list[tuple[bytes, set[Any]]] = []
|
|
114
|
+
try:
|
|
115
|
+
while True:
|
|
116
|
+
item = node._validation_verify_queue.get_nowait()
|
|
117
|
+
batch.append(item)
|
|
118
|
+
except Empty:
|
|
119
|
+
pass
|
|
120
|
+
|
|
121
|
+
if not batch:
|
|
122
|
+
time.sleep(0.1)
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
# Process the snapshot; new items enqueued during processing
|
|
126
|
+
# will be handled in the next iteration
|
|
127
|
+
for latest_b, peers in batch:
|
|
128
|
+
try:
|
|
129
|
+
_process_peers_latest_block(latest_b, peers)
|
|
130
|
+
except Exception:
|
|
131
|
+
pass
|
|
132
|
+
|
|
133
|
+
# Start workers as daemons
|
|
134
|
+
node.validation_discovery_thread = threading.Thread(
|
|
135
|
+
target=_discovery_worker, daemon=True, name="validation-discovery"
|
|
136
|
+
)
|
|
137
|
+
node.validation_verify_thread = threading.Thread(
|
|
138
|
+
target=_verify_worker, daemon=True, name="validation-verify"
|
|
139
|
+
)
|
|
140
|
+
node.validation_discovery_thread.start()
|
|
141
|
+
node.validation_verify_thread.start()
|
astreum/models/block.py
CHANGED
|
@@ -53,10 +53,10 @@ class Block:
|
|
|
53
53
|
self.transactions_count = transactions_count
|
|
54
54
|
self.delay_difficulty = delay_difficulty
|
|
55
55
|
self.delay_output = delay_output
|
|
56
|
-
self.delay_proof = delay_proof
|
|
57
|
-
self.validator_pk = validator_pk
|
|
58
|
-
self.body_tree = body_tree
|
|
59
|
-
self.signature = signature
|
|
56
|
+
self.delay_proof = delay_proof
|
|
57
|
+
self.validator_pk = validator_pk
|
|
58
|
+
self.body_tree = body_tree
|
|
59
|
+
self.signature = signature
|
|
60
60
|
|
|
61
61
|
@property
|
|
62
62
|
def hash(self) -> bytes:
|
|
@@ -68,11 +68,20 @@ class Block:
|
|
|
68
68
|
raise ValueError("Body tree not available for this block instance.")
|
|
69
69
|
return self._body_tree.root_hash
|
|
70
70
|
|
|
71
|
-
def get_signature(self) -> bytes:
|
|
72
|
-
"""Return the block's signature leaf."""
|
|
73
|
-
if self._signature is None:
|
|
74
|
-
raise ValueError("Signature not available for this block instance.")
|
|
75
|
-
return self._signature
|
|
71
|
+
def get_signature(self) -> bytes:
|
|
72
|
+
"""Return the block's signature leaf."""
|
|
73
|
+
if self._signature is None:
|
|
74
|
+
raise ValueError("Signature not available for this block instance.")
|
|
75
|
+
return self._signature
|
|
76
|
+
|
|
77
|
+
# Backwards/forwards alias for clarity with external specs
|
|
78
|
+
@property
|
|
79
|
+
def validator_public_key(self) -> Optional[bytes]:
|
|
80
|
+
return self.validator_pk
|
|
81
|
+
|
|
82
|
+
@validator_public_key.setter
|
|
83
|
+
def validator_public_key(self, value: Optional[bytes]) -> None:
|
|
84
|
+
self.validator_pk = value
|
|
76
85
|
|
|
77
86
|
def get_field(self, name: str) -> Union[int, bytes]:
|
|
78
87
|
"""Query a single body field by name, returning an int or bytes."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: astreum
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.37
|
|
4
4
|
Summary: Python library to interact with the Astreum blockchain and its Lispeum virtual machine.
|
|
5
5
|
Author-email: "Roy R. O. Okello" <roy@stelar.xyz>
|
|
6
6
|
Project-URL: Homepage, https://github.com/astreum/lib
|
|
@@ -79,7 +79,7 @@ The Lispeum virtual machine (VM) is embedded inside `astreum.Node`. You feed it
|
|
|
79
79
|
# Define a named function int.add (stack body) and call it with bytes 1 and 2
|
|
80
80
|
|
|
81
81
|
import uuid
|
|
82
|
-
from astreum
|
|
82
|
+
from astreum import Node, Env, Expr
|
|
83
83
|
|
|
84
84
|
# 1) Spin‑up a stand‑alone VM
|
|
85
85
|
node = Node()
|
|
@@ -139,6 +139,8 @@ except ParseError as e:
|
|
|
139
139
|
## Testing
|
|
140
140
|
|
|
141
141
|
```bash
|
|
142
|
+
python3 -m venv venv
|
|
142
143
|
source venv/bin/activate
|
|
144
|
+
pip install -e .
|
|
143
145
|
python3 -m unittest discover -s tests
|
|
144
146
|
```
|