astreum 0.2.39__py3-none-any.whl → 0.2.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. astreum/_communication/__init__.py +2 -0
  2. astreum/{models → _communication}/message.py +100 -64
  3. astreum/_communication/ping.py +33 -0
  4. astreum/_communication/route.py +53 -20
  5. astreum/_communication/setup.py +240 -99
  6. astreum/_communication/util.py +42 -0
  7. astreum/_consensus/__init__.py +6 -0
  8. astreum/_consensus/account.py +170 -0
  9. astreum/_consensus/accounts.py +67 -0
  10. astreum/_consensus/block.py +84 -52
  11. astreum/_consensus/chain.py +65 -62
  12. astreum/_consensus/fork.py +99 -97
  13. astreum/_consensus/genesis.py +141 -0
  14. astreum/_consensus/receipt.py +177 -0
  15. astreum/_consensus/setup.py +21 -162
  16. astreum/_consensus/transaction.py +43 -23
  17. astreum/_consensus/workers/__init__.py +9 -0
  18. astreum/_consensus/workers/discovery.py +48 -0
  19. astreum/_consensus/workers/validation.py +122 -0
  20. astreum/_consensus/workers/verify.py +63 -0
  21. astreum/_storage/atom.py +24 -7
  22. astreum/_storage/patricia.py +443 -0
  23. astreum/models/block.py +10 -10
  24. astreum/node.py +755 -753
  25. {astreum-0.2.39.dist-info → astreum-0.2.41.dist-info}/METADATA +1 -1
  26. astreum-0.2.41.dist-info/RECORD +53 -0
  27. astreum/lispeum/__init__.py +0 -0
  28. astreum/lispeum/environment.py +0 -40
  29. astreum/lispeum/expression.py +0 -86
  30. astreum/lispeum/parser.py +0 -41
  31. astreum/lispeum/tokenizer.py +0 -52
  32. astreum/models/account.py +0 -91
  33. astreum/models/accounts.py +0 -34
  34. astreum/models/transaction.py +0 -106
  35. astreum/relay/__init__.py +0 -0
  36. astreum/relay/peer.py +0 -9
  37. astreum/relay/route.py +0 -25
  38. astreum/relay/setup.py +0 -58
  39. astreum-0.2.39.dist-info/RECORD +0 -55
  40. {astreum-0.2.39.dist-info → astreum-0.2.41.dist-info}/WHEEL +0 -0
  41. {astreum-0.2.39.dist-info → astreum-0.2.41.dist-info}/licenses/LICENSE +0 -0
  42. {astreum-0.2.39.dist-info → astreum-0.2.41.dist-info}/top_level.txt +0 -0
@@ -1,98 +1,100 @@
1
- from __future__ import annotations
2
-
1
+ from __future__ import annotations
2
+
3
3
  from typing import Optional, Set, Any, Callable, Dict
4
- from .block import Block
5
- from .._storage.atom import ZERO32, Atom
6
-
7
-
8
- class Fork:
9
- """A branch head within a Chain (same root).
10
-
11
- - head: current tip block id (bytes)
12
- - peers: identifiers (e.g., peer pubkey objects) following this head
13
- - root: genesis block id for this chain (optional)
14
- - validated_upto: earliest verified ancestor (optional)
15
- - chain_fork_position: the chain's fork anchor relevant to this fork
16
- """
17
-
18
- def __init__(
19
- self,
20
- head: bytes,
21
- ) -> None:
22
- self.head: bytes = head
23
- self.peers: Set[Any] = set()
24
- self.root: Optional[bytes] = None
25
- self.validated_upto: Optional[bytes] = None
26
- self.chain_fork_position: Optional[bytes] = None
27
- # Mark the first block found malicious during validation; None means not found
28
- self.malicious_block_hash: Optional[bytes] = None
29
-
30
- def add_peer(self, peer_id: Any) -> None:
31
- self.peers.add(peer_id)
32
-
33
- def remove_peer(self, peer_id: Any) -> None:
34
- self.peers.discard(peer_id)
35
-
36
- def validate(
37
- self,
38
- storage_get: Callable[[bytes], Optional[object]],
39
- stop_heads: Optional[Set[bytes]] = None,
40
- ) -> bool:
41
- """Validate only up to the chain fork position, not genesis.
42
-
43
- Returns True if self.head descends from self.chain_fork_position (or if
44
- chain_fork_position is None/equals head), and updates validated_upto to
45
- that anchor. If stop_heads is provided, returns True early if ancestry
46
- reaches any of those heads, setting validated_upto to the matched head.
47
- Returns False if ancestry cannot be confirmed.
48
- """
49
- if self.chain_fork_position is None or self.chain_fork_position == self.head:
50
- self.validated_upto = self.head
51
- return True
52
- # Caches to avoid double fetching/decoding
53
- atom_cache: Dict[bytes, Optional[Atom]] = {}
54
- block_cache: Dict[bytes, Block] = {}
55
-
56
- def get_cached(k: bytes) -> Optional[Atom]:
57
- if k in atom_cache:
58
- return atom_cache[k]
59
- a = storage_get(k) # type: ignore[call-arg]
60
- atom_cache[k] = a # may be None if missing
61
- return a
62
-
63
- def load_block(bid: bytes) -> Optional[Block]:
64
- if bid in block_cache:
65
- return block_cache[bid]
66
- try:
67
- b = Block.from_atom(get_cached, bid) # type: ignore[arg-type]
68
- except Exception:
69
- return None
70
- block_cache[bid] = b
71
- return b
72
-
73
- blk = load_block(self.head)
74
- if blk is None:
75
- # Missing head data: unverifiable, not malicious
76
- return False
77
- # Walk up to fork anchor, validating each block signature + timestamp
78
- while True:
79
- try:
80
- blk.validate(get_cached) # type: ignore[arg-type]
81
- except Exception:
82
- # mark the first failure point
83
- self.malicious_block_hash = blk.hash
84
- return False
85
-
86
- # Early-exit if we met another known fork head
87
- if stop_heads and blk.hash in stop_heads:
88
- self.validated_upto = blk.hash
89
- return True
90
-
91
- if blk.hash == self.chain_fork_position:
92
- self.validated_upto = blk.hash
93
- return True
94
-
95
- nxt = load_block(blk.previous_block)
96
- if nxt is None:
97
- return False
98
- blk = nxt
4
+ from .block import Block
5
+ from .._storage.atom import ZERO32, Atom
6
+
7
+
8
+ class Fork:
9
+ """A branch head within a Chain (same root).
10
+
11
+ - head: current tip block id (bytes)
12
+ - peers: identifiers (e.g., peer pubkey objects) following this head
13
+ - root: genesis block id for this chain (optional)
14
+ - validated_upto: earliest verified ancestor (optional)
15
+ - chain_fork_position: the chain's fork anchor relevant to this fork
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ head: bytes,
21
+ ) -> None:
22
+ self.head: bytes = head
23
+ self.peers: Set[Any] = set()
24
+ self.root: Optional[bytes] = None
25
+ self.validated_upto: Optional[bytes] = None
26
+ self.chain_fork_position: Optional[bytes] = None
27
+ # Mark the first block found malicious during validation; None means not found
28
+ self.malicious_block_hash: Optional[bytes] = None
29
+
30
+ def add_peer(self, peer_id: Any) -> None:
31
+ self.peers.add(peer_id)
32
+
33
+ def remove_peer(self, peer_id: Any) -> None:
34
+ self.peers.discard(peer_id)
35
+
36
+ def validate(
37
+ self,
38
+ storage_get: Callable[[bytes], Optional[object]],
39
+ stop_heads: Optional[Set[bytes]] = None,
40
+ ) -> bool:
41
+ """Validate only up to the chain fork position, not genesis.
42
+
43
+ Returns True if self.head descends from self.chain_fork_position (or if
44
+ chain_fork_position is None/equals head), and updates validated_upto to
45
+ that anchor. If stop_heads is provided, returns True early if ancestry
46
+ reaches any of those heads, setting validated_upto to the matched head.
47
+ Returns False if ancestry cannot be confirmed.
48
+ """
49
+ if self.chain_fork_position is None or self.chain_fork_position == self.head:
50
+ self.validated_upto = self.head
51
+ return True
52
+ # Caches to avoid double fetching/decoding
53
+ atom_cache: Dict[bytes, Optional[Atom]] = {}
54
+ block_cache: Dict[bytes, Block] = {}
55
+
56
+ def get_cached(k: bytes) -> Optional[Atom]:
57
+ if k in atom_cache:
58
+ return atom_cache[k]
59
+ a = storage_get(k) # type: ignore[call-arg]
60
+ atom_cache[k] = a # may be None if missing
61
+ return a
62
+
63
+ def load_block(bid: bytes) -> Optional[Block]:
64
+ if bid in block_cache:
65
+ return block_cache[bid]
66
+ try:
67
+ b = Block.from_atom(get_cached, bid)
68
+ except Exception:
69
+ return None
70
+ block_cache[bid] = b
71
+ return b
72
+
73
+ blk = load_block(self.head)
74
+ if blk is None:
75
+ # Missing head data: unverifiable, not malicious
76
+ return False
77
+ # Walk up to fork anchor, validating each block signature + timestamp
78
+ while True:
79
+ try:
80
+ blk.validate(get_cached) # type: ignore[arg-type]
81
+ except Exception:
82
+ # mark the first failure point
83
+ self.malicious_block_hash = blk.hash
84
+ return False
85
+
86
+ # Early-exit if we met another known fork head
87
+ if stop_heads and blk.hash in stop_heads:
88
+ self.validated_upto = blk.hash
89
+ return True
90
+
91
+ if blk.hash == self.chain_fork_position:
92
+ self.validated_upto = blk.hash
93
+ return True
94
+
95
+ prev_hash = blk.previous_block_hash if hasattr(blk, "previous_block_hash") else ZERO32
96
+ nxt = load_block(prev_hash)
97
+ if nxt is None:
98
+ return False
99
+ blk.previous_block = nxt # cache for future use
100
+ blk = nxt
@@ -0,0 +1,141 @@
1
+
2
+ from __future__ import annotations
3
+
4
+ from typing import Any, Iterable, List, Optional, Tuple
5
+
6
+ from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
7
+
8
+ from .account import Account
9
+ from .block import Block
10
+ from .._storage.atom import Atom, ZERO32
11
+ from .._storage.patricia import PatriciaTrie, PatriciaNode
12
+
13
+ TREASURY_ADDRESS = b"\x01" * 32
14
+ BURN_ADDRESS = b"\x00" * 32
15
+
16
+
17
+ def _int_to_be_bytes(value: int) -> bytes:
18
+ if value < 0:
19
+ raise ValueError("integer fields in genesis must be non-negative")
20
+ if value == 0:
21
+ return b"\x00"
22
+ length = (value.bit_length() + 7) // 8
23
+ return value.to_bytes(length, "big")
24
+
25
+
26
+ def _make_list(child_ids: List[bytes]) -> Tuple[bytes, List[Atom]]:
27
+ next_hash = ZERO32
28
+ chain: List[Atom] = []
29
+ for child_id in reversed(child_ids):
30
+ elem = Atom.from_data(data=child_id, next_hash=next_hash)
31
+ next_hash = elem.object_id()
32
+ chain.append(elem)
33
+ chain.reverse()
34
+
35
+ value_atom = Atom.from_data(
36
+ data=len(child_ids).to_bytes(8, "little"),
37
+ next_hash=next_hash,
38
+ )
39
+ type_atom = Atom.from_data(data=b"list", next_hash=value_atom.object_id())
40
+ atoms = chain + [value_atom, type_atom]
41
+ return type_atom.object_id(), atoms
42
+
43
+
44
+ def _store_atoms(node: Any, atoms: Iterable[Atom]) -> None:
45
+ setter = getattr(node, "_local_set", None)
46
+ if not callable(setter):
47
+ raise TypeError("node must expose '_local_set(object_id, atom)'")
48
+ for atom in atoms:
49
+ setter(atom.object_id(), atom)
50
+
51
+
52
+ def _persist_trie(trie: PatriciaTrie, node: Any) -> None:
53
+ for patricia_node in trie.nodes.values():
54
+ _, atoms = patricia_node.to_atoms()
55
+ _store_atoms(node, atoms)
56
+
57
+
58
+ if not hasattr(PatriciaNode, "to_bytes"):
59
+ def _patricia_node_to_bytes(self: PatriciaNode) -> bytes: # type: ignore[no-redef]
60
+ fields = [
61
+ bytes([self.key_len]) + self.key,
62
+ self.child_0 or ZERO32,
63
+ self.child_1 or ZERO32,
64
+ self.value or b"",
65
+ ]
66
+ encoded: List[bytes] = []
67
+ for field in fields:
68
+ encoded.append(len(field).to_bytes(4, "big"))
69
+ encoded.append(field)
70
+ return b"".join(encoded)
71
+
72
+ PatriciaNode.to_bytes = _patricia_node_to_bytes # type: ignore[attr-defined]
73
+
74
+
75
+ def create_genesis_block(node: Any, validator_public_key: bytes, validator_secret_key: bytes) -> Block:
76
+ validator_pk = bytes(validator_public_key)
77
+
78
+ if len(validator_pk) != 32:
79
+ raise ValueError("validator_public_key must be 32 bytes")
80
+
81
+ # 1. Stake trie with single validator stake of 1 (encoded on 32 bytes).
82
+ stake_trie = PatriciaTrie()
83
+ stake_amount = (1).to_bytes(32, "big")
84
+ stake_trie.put(node, validator_pk, stake_amount)
85
+ _persist_trie(stake_trie, node)
86
+ stake_root = stake_trie.root_hash or ZERO32
87
+
88
+ # 2. Account trie with treasury, burn, and validator accounts.
89
+ accounts_trie = PatriciaTrie()
90
+
91
+ treasury_account = Account.create(balance=1, data=stake_root, nonce=0)
92
+ treasury_account_id, treasury_atoms = treasury_account.to_atom()
93
+ _store_atoms(node, treasury_atoms)
94
+ accounts_trie.put(node, TREASURY_ADDRESS, treasury_account_id)
95
+
96
+ burn_account = Account.create(balance=0, data=b"", nonce=0)
97
+ burn_account_id, burn_atoms = burn_account.to_atom()
98
+ _store_atoms(node, burn_atoms)
99
+ accounts_trie.put(node, BURN_ADDRESS, burn_account_id)
100
+
101
+ validator_account = Account.create(balance=0, data=b"", nonce=0)
102
+ validator_account_id, validator_atoms = validator_account.to_atom()
103
+ _store_atoms(node, validator_atoms)
104
+ accounts_trie.put(node, validator_pk, validator_account_id)
105
+
106
+ _persist_trie(accounts_trie, node)
107
+
108
+ accounts_root = accounts_trie.root_hash
109
+ if accounts_root is None:
110
+ raise ValueError("genesis accounts trie is empty")
111
+
112
+ # 3. Assemble block metadata.
113
+ block = Block()
114
+ block.previous_block_hash = ZERO32
115
+ block.number = 0
116
+ block.timestamp = 0
117
+ block.accounts_hash = accounts_root
118
+ block.accounts = accounts_trie
119
+ block.transactions_total_fees = 0
120
+ block.transactions_hash = ZERO32
121
+ block.receipts_hash = ZERO32
122
+ block.delay_difficulty = 0
123
+ block.delay_output = b""
124
+ block.validator_public_key = validator_pk
125
+ block.transactions = []
126
+ block.receipts = []
127
+
128
+ # 4. Sign the block body with the validator secret key.
129
+ block.signature = b""
130
+ block.to_atom()
131
+
132
+ if block.body_hash is None:
133
+ raise ValueError("failed to materialise genesis block body")
134
+
135
+ secret = Ed25519PrivateKey.from_private_bytes(validator_secret_key)
136
+ block.signature = secret.sign(block.body_hash)
137
+ block_hash, block_atoms = block.to_atom()
138
+ _store_atoms(node, block_atoms)
139
+
140
+ block.hash = block_hash
141
+ return block
@@ -0,0 +1,177 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Callable, List, Optional, Tuple
5
+
6
+ from .._storage.atom import Atom, ZERO32
7
+
8
+ STATUS_SUCCESS = 0
9
+ STATUS_FAILED = 1
10
+
11
+
12
+ def _int_to_be_bytes(value: Optional[int]) -> bytes:
13
+ if value is None:
14
+ return b""
15
+ value = int(value)
16
+ if value == 0:
17
+ return b"\x00"
18
+ size = (value.bit_length() + 7) // 8
19
+ return value.to_bytes(size, "big")
20
+
21
+
22
+ def _be_bytes_to_int(data: Optional[bytes]) -> int:
23
+ if not data:
24
+ return 0
25
+ return int.from_bytes(data, "big")
26
+
27
+
28
+ def _make_list(child_ids: List[bytes]) -> Tuple[bytes, List[Atom]]:
29
+ atoms: List[Atom] = []
30
+ next_hash = ZERO32
31
+ elements: List[Atom] = []
32
+ for child_id in reversed(child_ids):
33
+ elem = Atom.from_data(data=child_id, next_hash=next_hash)
34
+ next_hash = elem.object_id()
35
+ elements.append(elem)
36
+ elements.reverse()
37
+ list_value = Atom.from_data(data=len(child_ids).to_bytes(8, "little"), next_hash=next_hash)
38
+ list_type = Atom.from_data(data=b"list", next_hash=list_value.object_id())
39
+ atoms.extend(elements)
40
+ atoms.append(list_value)
41
+ atoms.append(list_type)
42
+ return list_type.object_id(), atoms
43
+
44
+
45
+ def _read_list_entries(
46
+ storage_get: Callable[[bytes], Optional[Atom]], start: bytes
47
+ ) -> List[bytes]:
48
+ entries: List[bytes] = []
49
+ current = start if start and start != ZERO32 else b""
50
+ while current:
51
+ elem = storage_get(current)
52
+ if elem is None:
53
+ break
54
+ entries.append(elem.data)
55
+ nxt = elem.next
56
+ current = nxt if nxt and nxt != ZERO32 else b""
57
+ return entries
58
+
59
+
60
+ def _read_payload_bytes(
61
+ storage_get: Callable[[bytes], Optional[Atom]], object_id: bytes
62
+ ) -> bytes:
63
+ if not object_id or object_id == ZERO32:
64
+ return b""
65
+ atom = storage_get(object_id)
66
+ if atom is None:
67
+ return b""
68
+ if atom.data == b"bytes":
69
+ value_atom = storage_get(atom.next)
70
+ return value_atom.data if value_atom is not None else b""
71
+ return atom.data
72
+
73
+
74
+ @dataclass
75
+ class Receipt:
76
+ transaction_hash: bytes = ZERO32
77
+ cost: int = 0
78
+ logs: bytes = b""
79
+ status: int = 0
80
+ hash: bytes = ZERO32
81
+ atoms: List[Atom] = field(default_factory=list)
82
+
83
+ def to_atom(self) -> Tuple[bytes, List[Atom]]:
84
+ """Serialise the receipt into Atom storage."""
85
+ if self.status not in (STATUS_SUCCESS, STATUS_FAILED):
86
+ raise ValueError("unsupported receipt status")
87
+
88
+ atoms: List[Atom] = []
89
+
90
+ tx_atom = Atom.from_data(data=bytes(self.transaction_hash))
91
+ status_atom = Atom.from_data(data=_int_to_be_bytes(self.status))
92
+ cost_atom = Atom.from_data(data=_int_to_be_bytes(self.cost))
93
+ logs_atom = Atom.from_data(data=bytes(self.logs))
94
+
95
+ atoms.extend([tx_atom, status_atom, cost_atom, logs_atom])
96
+
97
+ body_child_ids = [
98
+ tx_atom.object_id(),
99
+ status_atom.object_id(),
100
+ cost_atom.object_id(),
101
+ logs_atom.object_id(),
102
+ ]
103
+ body_id, body_atoms = _make_list(body_child_ids)
104
+ atoms.extend(body_atoms)
105
+
106
+ type_atom = Atom.from_data(data=b"receipt", next_hash=body_id)
107
+ atoms.append(type_atom)
108
+
109
+ top_list_id, top_atoms = _make_list([type_atom.object_id(), body_id])
110
+ atoms.extend(top_atoms)
111
+
112
+ return top_list_id, atoms
113
+
114
+ def atomize(self) -> Tuple[bytes, List[Atom]]:
115
+ """Generate atoms for this receipt and cache them."""
116
+ receipt_id, atoms = self.to_atom()
117
+ self.hash = receipt_id
118
+ self.atoms = atoms
119
+ return receipt_id, atoms
120
+
121
+ @classmethod
122
+ def from_atom(
123
+ cls,
124
+ storage_get: Callable[[bytes], Optional[Atom]],
125
+ receipt_id: bytes,
126
+ ) -> Receipt:
127
+ """Materialise a Receipt from Atom storage."""
128
+ top_type_atom = storage_get(receipt_id)
129
+ if top_type_atom is None or top_type_atom.data != b"list":
130
+ raise ValueError("not a receipt (outer list missing)")
131
+
132
+ top_value_atom = storage_get(top_type_atom.next)
133
+ if top_value_atom is None:
134
+ raise ValueError("malformed receipt (outer value missing)")
135
+
136
+ head = top_value_atom.next
137
+ first_elem = storage_get(head) if head else None
138
+ if first_elem is None:
139
+ raise ValueError("malformed receipt (type element missing)")
140
+
141
+ type_atom_id = first_elem.data
142
+ type_atom = storage_get(type_atom_id)
143
+ if type_atom is None or type_atom.data != b"receipt":
144
+ raise ValueError("not a receipt (type mismatch)")
145
+
146
+ remainder_entries = _read_list_entries(storage_get, first_elem.next)
147
+ if not remainder_entries:
148
+ raise ValueError("malformed receipt (body missing)")
149
+ body_id = remainder_entries[0]
150
+
151
+ body_type_atom = storage_get(body_id)
152
+ if body_type_atom is None or body_type_atom.data != b"list":
153
+ raise ValueError("malformed receipt body (type)")
154
+
155
+ body_value_atom = storage_get(body_type_atom.next)
156
+ if body_value_atom is None:
157
+ raise ValueError("malformed receipt body (value)")
158
+
159
+ body_entries = _read_list_entries(storage_get, body_value_atom.next)
160
+ if len(body_entries) < 4:
161
+ body_entries.extend([ZERO32] * (4 - len(body_entries)))
162
+
163
+ transaction_hash_bytes = _read_payload_bytes(storage_get, body_entries[0])
164
+ status_bytes = _read_payload_bytes(storage_get, body_entries[1])
165
+ cost_bytes = _read_payload_bytes(storage_get, body_entries[2])
166
+ logs_bytes = _read_payload_bytes(storage_get, body_entries[3])
167
+ status_value = _be_bytes_to_int(status_bytes)
168
+ if status_value not in (STATUS_SUCCESS, STATUS_FAILED):
169
+ raise ValueError("unsupported receipt status")
170
+
171
+ return cls(
172
+ transaction_hash=transaction_hash_bytes or ZERO32,
173
+ cost=_be_bytes_to_int(cost_bytes),
174
+ logs=logs_bytes,
175
+ status=status_value,
176
+ hash=bytes(receipt_id),
177
+ )