astreum 0.2.14__py3-none-any.whl → 0.2.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of astreum might be problematic. Click here for more details.
- {astreum-0.2.14.dist-info → astreum-0.2.15.dist-info}/METADATA +1 -1
- {astreum-0.2.14.dist-info → astreum-0.2.15.dist-info}/RECORD +5 -9
- astreum/_node/__init__.py +0 -447
- astreum/_node/storage/__init__.py +0 -0
- astreum/_node/storage/merkle.py +0 -224
- astreum/_node/storage/patricia.py +0 -289
- {astreum-0.2.14.dist-info → astreum-0.2.15.dist-info}/WHEEL +0 -0
- {astreum-0.2.14.dist-info → astreum-0.2.15.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.2.14.dist-info → astreum-0.2.15.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: astreum
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.15
|
|
4
4
|
Summary: Python library to interact with the Astreum blockchain and its Lispeum virtual machine.
|
|
5
5
|
Author-email: "Roy R. O. Okello" <roy@stelar.xyz>
|
|
6
6
|
Project-URL: Homepage, https://github.com/astreum/lib
|
|
@@ -1,10 +1,6 @@
|
|
|
1
1
|
astreum/__init__.py,sha256=y2Ok3EY_FstcmlVASr80lGR_0w-dH-SXDCCQFmL6uwA,28
|
|
2
2
|
astreum/format.py,sha256=X4tG5GGPweNCE54bHYkLFiuLTbmpy5upO_s1Cef-MGA,2711
|
|
3
3
|
astreum/node.py,sha256=dPloCXuDyIn3-KDqxlgl3jxsonJlFMLi_quwJRsoLC8,46259
|
|
4
|
-
astreum/_node/__init__.py,sha256=7yz1YHo0DCUgUQvJf75qdUo_ocl5-XZRU-Vc2NhcvJs,18639
|
|
5
|
-
astreum/_node/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
astreum/_node/storage/merkle.py,sha256=XCQBrHbwI0FuPTCUwHOy-Kva3uWbvCdw_-13hRPf1UI,10219
|
|
7
|
-
astreum/_node/storage/patricia.py,sha256=tynxn_qETCU9X7yJdeh_0GHpC8Pzcoq4CWrSZlMUeRc,11546
|
|
8
4
|
astreum/crypto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
5
|
astreum/crypto/ed25519.py,sha256=FRnvlN0kZlxn4j-sJKl-C9tqiz_0z4LZyXLj3KIj1TQ,1760
|
|
10
6
|
astreum/crypto/quadratic_form.py,sha256=pJgbORey2NTWbQNhdyvrjy_6yjORudQ67jBz2ScHptg,4037
|
|
@@ -18,8 +14,8 @@ astreum/models/block.py,sha256=NKYyxL6_BrtXRgcgIrnkYsobX0Z_bGqQT-fQ_09zEOo,3226
|
|
|
18
14
|
astreum/models/merkle.py,sha256=ceH4yJlt82XDTXe46hyoU88QIaGVOsVDsBZeDnOJYv8,8590
|
|
19
15
|
astreum/models/patricia.py,sha256=D7UVU4b6Yvn2_McI35VoMEbpqwR8OmZon5LGoUSRADo,8913
|
|
20
16
|
astreum/models/transaction.py,sha256=Vu0cfmh80S31nEbxyJfv1dk9_zqtgGNyMdhlM0uQF4E,2611
|
|
21
|
-
astreum-0.2.
|
|
22
|
-
astreum-0.2.
|
|
23
|
-
astreum-0.2.
|
|
24
|
-
astreum-0.2.
|
|
25
|
-
astreum-0.2.
|
|
17
|
+
astreum-0.2.15.dist-info/licenses/LICENSE,sha256=gYBvRDP-cPLmTyJhvZ346QkrYW_eleke4Z2Yyyu43eQ,1089
|
|
18
|
+
astreum-0.2.15.dist-info/METADATA,sha256=7Nv0BCooioBjPEUhtTB1SdaNceBCgGf2iPZY2BLSx1E,5478
|
|
19
|
+
astreum-0.2.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
20
|
+
astreum-0.2.15.dist-info/top_level.txt,sha256=1EG1GmkOk3NPmUA98FZNdKouhRyget-KiFiMk0i2Uz0,8
|
|
21
|
+
astreum-0.2.15.dist-info/RECORD,,
|
astreum/_node/__init__.py
DELETED
|
@@ -1,447 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import threading
|
|
3
|
-
from typing import List
|
|
4
|
-
from cryptography.hazmat.primitives.asymmetric import ed25519
|
|
5
|
-
from cryptography.hazmat.primitives import serialization
|
|
6
|
-
|
|
7
|
-
from .relay import Relay, Topic
|
|
8
|
-
from ..machine import AstreumMachine
|
|
9
|
-
from .utils import hash_data
|
|
10
|
-
from .validation.block import Block
|
|
11
|
-
from .storage.storage import Storage
|
|
12
|
-
|
|
13
|
-
class Node:
|
|
14
|
-
def __init__(self, config: dict):
|
|
15
|
-
# Ensure config is a dictionary, but allow it to be None
|
|
16
|
-
self.config = config if config is not None else {}
|
|
17
|
-
|
|
18
|
-
# Handle validation key if provided
|
|
19
|
-
self.validation_private_key = None
|
|
20
|
-
self.validation_public_key = None
|
|
21
|
-
self.is_validator = False
|
|
22
|
-
|
|
23
|
-
# Extract validation private key from config
|
|
24
|
-
if 'validation_private_key' in self.config:
|
|
25
|
-
try:
|
|
26
|
-
key_bytes = bytes.fromhex(self.config['validation_private_key'])
|
|
27
|
-
self.validation_private_key = ed25519.Ed25519PrivateKey.from_private_bytes(key_bytes)
|
|
28
|
-
self.validation_public_key = self.validation_private_key.public_key()
|
|
29
|
-
self.is_validator = True
|
|
30
|
-
|
|
31
|
-
# Set validation_route to True in config so relay will join validation route
|
|
32
|
-
self.config['validation_route'] = True
|
|
33
|
-
print(f"Node is configured as a validator with validation key")
|
|
34
|
-
except Exception as e:
|
|
35
|
-
print(f"Error loading validation private key: {e}")
|
|
36
|
-
|
|
37
|
-
# Initialize relay with our config
|
|
38
|
-
self.relay = Relay(self.config)
|
|
39
|
-
|
|
40
|
-
# Get the node_id from relay
|
|
41
|
-
self.node_id = self.relay.node_id
|
|
42
|
-
|
|
43
|
-
# Initialize storage
|
|
44
|
-
self.storage = Storage(self.config)
|
|
45
|
-
self.storage.node = self # Set the storage node reference to self
|
|
46
|
-
|
|
47
|
-
# Initialize blockchain state
|
|
48
|
-
self.blockchain = create_account_state(self.config)
|
|
49
|
-
|
|
50
|
-
# Store our validator info if we're a validator
|
|
51
|
-
if self.is_validator and self.validation_public_key:
|
|
52
|
-
self.validator_address = self.validation_public_key.public_bytes(
|
|
53
|
-
encoding=serialization.Encoding.Raw,
|
|
54
|
-
format=serialization.PublicFormat.Raw
|
|
55
|
-
)
|
|
56
|
-
self.validator_private_bytes = self.validation_private_key.private_bytes(
|
|
57
|
-
encoding=serialization.Encoding.Raw,
|
|
58
|
-
format=serialization.PrivateFormat.Raw,
|
|
59
|
-
encryption_algorithm=serialization.NoEncryption()
|
|
60
|
-
)
|
|
61
|
-
print(f"Registered validator with address: {self.validator_address.hex()}")
|
|
62
|
-
else:
|
|
63
|
-
self.validator_address = None
|
|
64
|
-
self.validator_private_bytes = None
|
|
65
|
-
|
|
66
|
-
# Latest block of the chain this node is following
|
|
67
|
-
self.latest_block = None
|
|
68
|
-
self.followed_chain_id = self.config.get('followed_chain_id', None)
|
|
69
|
-
|
|
70
|
-
# Initialize machine
|
|
71
|
-
self.machine = AstreumMachine(node=self)
|
|
72
|
-
|
|
73
|
-
# Register message handlers
|
|
74
|
-
self.relay.message_handlers[Topic.PEER_ROUTE] = self._handle_peer_route
|
|
75
|
-
self.relay.message_handlers[Topic.PING] = self._handle_ping
|
|
76
|
-
self.relay.message_handlers[Topic.PONG] = self._handle_pong
|
|
77
|
-
self.relay.message_handlers[Topic.OBJECT_REQUEST] = self._handle_object_request
|
|
78
|
-
self.relay.message_handlers[Topic.OBJECT_RESPONSE] = self._handle_object_response
|
|
79
|
-
self.relay.message_handlers[Topic.ROUTE_REQUEST] = self._handle_route_request
|
|
80
|
-
self.relay.message_handlers[Topic.ROUTE] = self._handle_route
|
|
81
|
-
self.relay.message_handlers[Topic.LATEST_BLOCK_REQUEST] = self._handle_latest_block_request
|
|
82
|
-
self.relay.message_handlers[Topic.LATEST_BLOCK] = self._handle_latest_block
|
|
83
|
-
self.relay.message_handlers[Topic.TRANSACTION] = self._handle_transaction
|
|
84
|
-
self.relay.message_handlers[Topic.BLOCK_REQUEST] = self._handle_block_request
|
|
85
|
-
self.relay.message_handlers[Topic.BLOCK_RESPONSE] = self._handle_block_response
|
|
86
|
-
|
|
87
|
-
# Initialize latest block from storage if available
|
|
88
|
-
self._initialize_latest_block()
|
|
89
|
-
|
|
90
|
-
# Candidate chains that might be adopted
|
|
91
|
-
self.candidate_chains = {} # chain_id -> {'latest_block': block, 'timestamp': time.time()}
|
|
92
|
-
self.pending_blocks = {} # block_hash -> {'block': block, 'timestamp': time.time()}
|
|
93
|
-
|
|
94
|
-
# Threads for validation and chain monitoring
|
|
95
|
-
self.running = False
|
|
96
|
-
self.main_chain_validation_thread = None
|
|
97
|
-
self.candidate_chain_validation_thread = None
|
|
98
|
-
|
|
99
|
-
# Pending transactions for a block
|
|
100
|
-
self.pending_transactions = {} # tx_hash -> {'transaction': tx, 'timestamp': time.time()}
|
|
101
|
-
|
|
102
|
-
# Last block production attempt time
|
|
103
|
-
self.last_block_attempt_time = 0
|
|
104
|
-
|
|
105
|
-
def start(self):
|
|
106
|
-
"""Start the node."""
|
|
107
|
-
self.running = True
|
|
108
|
-
|
|
109
|
-
# Start relay
|
|
110
|
-
self.relay.start()
|
|
111
|
-
|
|
112
|
-
# Start chain monitoring thread
|
|
113
|
-
self.main_chain_validation_thread = threading.Thread(
|
|
114
|
-
target=self._main_chain_validation_loop,
|
|
115
|
-
name="MainChainValidation"
|
|
116
|
-
)
|
|
117
|
-
self.main_chain_validation_thread.daemon = True
|
|
118
|
-
self.main_chain_validation_thread.start()
|
|
119
|
-
|
|
120
|
-
self.candidate_chain_validation_thread = threading.Thread(
|
|
121
|
-
target=self._candidate_chain_validation_loop,
|
|
122
|
-
name="CandidateChainValidation"
|
|
123
|
-
)
|
|
124
|
-
self.candidate_chain_validation_thread.daemon = True
|
|
125
|
-
self.candidate_chain_validation_thread.start()
|
|
126
|
-
|
|
127
|
-
# Set up recurring block query tasks
|
|
128
|
-
main_query_thread = threading.Thread(
|
|
129
|
-
target=self._block_query_loop,
|
|
130
|
-
args=('main',),
|
|
131
|
-
daemon=True
|
|
132
|
-
)
|
|
133
|
-
main_query_thread.start()
|
|
134
|
-
|
|
135
|
-
validation_query_thread = threading.Thread(
|
|
136
|
-
target=self._block_query_loop,
|
|
137
|
-
args=('validation',),
|
|
138
|
-
daemon=True
|
|
139
|
-
)
|
|
140
|
-
validation_query_thread.start()
|
|
141
|
-
|
|
142
|
-
print(f"Node started with ID {self.node_id.hex()}")
|
|
143
|
-
|
|
144
|
-
def stop(self):
|
|
145
|
-
"""Stop the node and all its services."""
|
|
146
|
-
self.running = False
|
|
147
|
-
|
|
148
|
-
# Stop all threads
|
|
149
|
-
if self.main_chain_validation_thread and self.main_chain_validation_thread.is_alive():
|
|
150
|
-
self.main_chain_validation_thread.join(timeout=1.0)
|
|
151
|
-
|
|
152
|
-
if self.candidate_chain_validation_thread and self.candidate_chain_validation_thread.is_alive():
|
|
153
|
-
self.candidate_chain_validation_thread.join(timeout=1.0)
|
|
154
|
-
|
|
155
|
-
# Stop relay last
|
|
156
|
-
if self.relay:
|
|
157
|
-
self.relay.stop()
|
|
158
|
-
|
|
159
|
-
print("Node stopped")
|
|
160
|
-
|
|
161
|
-
def _main_chain_validation_loop(self):
|
|
162
|
-
"""
|
|
163
|
-
Main validation loop for the primary blockchain.
|
|
164
|
-
This thread prioritizes validating blocks on the main chain we're following.
|
|
165
|
-
"""
|
|
166
|
-
while self.running:
|
|
167
|
-
try:
|
|
168
|
-
# Update latest block if we don't have one yet
|
|
169
|
-
if not self.latest_block and hasattr(self.blockchain, 'get_latest_block'):
|
|
170
|
-
self.latest_block = self.blockchain.get_latest_block()
|
|
171
|
-
|
|
172
|
-
# Process any blocks that extend our main chain immediately
|
|
173
|
-
self._process_main_chain_blocks()
|
|
174
|
-
|
|
175
|
-
# Attempt block production if we are a validator
|
|
176
|
-
if self.is_validator and self.validator_address:
|
|
177
|
-
self._attempt_block_production()
|
|
178
|
-
|
|
179
|
-
# Cleanup old items
|
|
180
|
-
self._prune_pending_items()
|
|
181
|
-
|
|
182
|
-
# Sleep to prevent high CPU usage
|
|
183
|
-
time.sleep(0.1) # Short sleep for main chain validation
|
|
184
|
-
except Exception as e:
|
|
185
|
-
print(f"Error in main chain validation loop: {e}")
|
|
186
|
-
time.sleep(1) # Longer sleep on error
|
|
187
|
-
|
|
188
|
-
def _candidate_chain_validation_loop(self):
|
|
189
|
-
"""
|
|
190
|
-
Validation loop for candidate chains (potential forks).
|
|
191
|
-
This thread handles validation of blocks from alternate chains
|
|
192
|
-
without slowing down the main chain processing.
|
|
193
|
-
"""
|
|
194
|
-
while self.running:
|
|
195
|
-
try:
|
|
196
|
-
# Process candidate chains
|
|
197
|
-
self._evaluate_candidate_chains()
|
|
198
|
-
|
|
199
|
-
# Prune old candidate chains
|
|
200
|
-
self._prune_candidate_chains()
|
|
201
|
-
|
|
202
|
-
# Sleep longer for candidate chain validation (lower priority)
|
|
203
|
-
time.sleep(1) # Longer sleep for candidate chain validation
|
|
204
|
-
except Exception as e:
|
|
205
|
-
print(f"Error in candidate chain validation loop: {e}")
|
|
206
|
-
time.sleep(2) # Even longer sleep on error
|
|
207
|
-
|
|
208
|
-
def _prune_pending_items(self):
|
|
209
|
-
"""Remove old pending blocks and transactions."""
|
|
210
|
-
current_time = time.time()
|
|
211
|
-
|
|
212
|
-
# Prune old pending blocks (older than 1 hour)
|
|
213
|
-
blocks_to_remove = [
|
|
214
|
-
block_hash for block_hash, data in self.pending_blocks.items()
|
|
215
|
-
if current_time - data['timestamp'] > 3600 # 1 hour
|
|
216
|
-
]
|
|
217
|
-
for block_hash in blocks_to_remove:
|
|
218
|
-
del self.pending_blocks[block_hash]
|
|
219
|
-
|
|
220
|
-
# Prune old pending transactions (older than 30 minutes)
|
|
221
|
-
txs_to_remove = [
|
|
222
|
-
tx_hash for tx_hash, data in self.pending_transactions.items()
|
|
223
|
-
if current_time - data['timestamp'] > 1800 # 30 minutes
|
|
224
|
-
]
|
|
225
|
-
for tx_hash in txs_to_remove:
|
|
226
|
-
del self.pending_transactions[tx_hash]
|
|
227
|
-
|
|
228
|
-
def _process_main_chain_blocks(self):
|
|
229
|
-
"""
|
|
230
|
-
Process blocks that extend our current main chain.
|
|
231
|
-
Prioritizes blocks that build on our latest block.
|
|
232
|
-
"""
|
|
233
|
-
# Skip if we don't have a latest block yet
|
|
234
|
-
if not self.latest_block:
|
|
235
|
-
return
|
|
236
|
-
|
|
237
|
-
# Get the hash of our latest block
|
|
238
|
-
latest_hash = self.latest_block.get_hash()
|
|
239
|
-
|
|
240
|
-
# Find any pending blocks that build on our latest block
|
|
241
|
-
main_chain_blocks = []
|
|
242
|
-
for block_hash, data in list(self.pending_blocks.items()):
|
|
243
|
-
block = data['block']
|
|
244
|
-
|
|
245
|
-
# Check if this block extends our latest block
|
|
246
|
-
if block.previous == latest_hash:
|
|
247
|
-
main_chain_blocks.append(block)
|
|
248
|
-
|
|
249
|
-
# Process found blocks
|
|
250
|
-
for block in main_chain_blocks:
|
|
251
|
-
self._validate_and_process_main_chain_block(block)
|
|
252
|
-
|
|
253
|
-
def _validate_and_process_main_chain_block(self, block: Block):
|
|
254
|
-
"""
|
|
255
|
-
Validate and process a block that extends our main chain.
|
|
256
|
-
|
|
257
|
-
Args:
|
|
258
|
-
block: Block to validate and process
|
|
259
|
-
"""
|
|
260
|
-
try:
|
|
261
|
-
# Validate block
|
|
262
|
-
is_valid = validate_block(block, self.blockchain.get_accounts_at_block(block.previous), self.blockchain.get_blocks())
|
|
263
|
-
|
|
264
|
-
if is_valid:
|
|
265
|
-
# Apply block to our state
|
|
266
|
-
success = validate_and_apply_block(self.blockchain, block)
|
|
267
|
-
if success:
|
|
268
|
-
print(f"Applied valid block {block.number} to blockchain state")
|
|
269
|
-
self._update_latest_block(block)
|
|
270
|
-
blocks_to_remove = [block.get_hash()]
|
|
271
|
-
for block_hash in blocks_to_remove:
|
|
272
|
-
if block_hash in self.pending_blocks:
|
|
273
|
-
del self.pending_blocks[block_hash]
|
|
274
|
-
print(f"Added block {block.number} to blockchain")
|
|
275
|
-
return True
|
|
276
|
-
except Exception as e:
|
|
277
|
-
print(f"Error validating main chain block {block.number}: {e}")
|
|
278
|
-
|
|
279
|
-
return False
|
|
280
|
-
|
|
281
|
-
def _evaluate_candidate_chains(self):
|
|
282
|
-
"""
|
|
283
|
-
Evaluate candidate chains to determine if any should become our main chain.
|
|
284
|
-
This will validate pending blocks and look for chains with higher cumulative difficulty.
|
|
285
|
-
"""
|
|
286
|
-
# Skip if no candidate chains
|
|
287
|
-
if not self.candidate_chains:
|
|
288
|
-
return
|
|
289
|
-
|
|
290
|
-
# For each candidate chain, validate blocks and calculate metrics
|
|
291
|
-
for chain_id, data in list(self.candidate_chains.items()):
|
|
292
|
-
latest_candidate_block = data['latest_block']
|
|
293
|
-
|
|
294
|
-
# Build the chain backwards
|
|
295
|
-
chain_blocks = self._build_chain_from_latest(latest_candidate_block)
|
|
296
|
-
|
|
297
|
-
# Skip if we couldn't build a complete chain
|
|
298
|
-
if not chain_blocks:
|
|
299
|
-
continue
|
|
300
|
-
|
|
301
|
-
# Validate the entire chain
|
|
302
|
-
valid_chain = self._validate_candidate_chain(chain_blocks)
|
|
303
|
-
|
|
304
|
-
# If valid and better than our current chain, switch to it
|
|
305
|
-
if valid_chain and self._is_better_chain(chain_blocks):
|
|
306
|
-
self._switch_to_new_chain(chain_blocks)
|
|
307
|
-
|
|
308
|
-
def _build_chain_from_latest(self, latest_block: Block) -> List[Block]:
|
|
309
|
-
"""
|
|
310
|
-
Build a chain from the latest block back to a known point in our blockchain.
|
|
311
|
-
|
|
312
|
-
Args:
|
|
313
|
-
latest_block: Latest block in the candidate chain
|
|
314
|
-
|
|
315
|
-
Returns:
|
|
316
|
-
List of blocks in the chain, ordered from oldest to newest
|
|
317
|
-
"""
|
|
318
|
-
chain_blocks = [latest_block]
|
|
319
|
-
current_block = latest_block
|
|
320
|
-
|
|
321
|
-
# Track visited blocks to avoid cycles
|
|
322
|
-
visited = {current_block.get_hash()}
|
|
323
|
-
|
|
324
|
-
# Build chain backwards until we either:
|
|
325
|
-
# 1. Find a block in our main chain
|
|
326
|
-
# 2. Run out of blocks
|
|
327
|
-
# 3. Detect a cycle
|
|
328
|
-
while current_block.number > 0:
|
|
329
|
-
previous_hash = current_block.previous
|
|
330
|
-
|
|
331
|
-
# Check if we have this block in our blockchain
|
|
332
|
-
if hasattr(self.blockchain, 'has_block') and self.blockchain.has_block(previous_hash):
|
|
333
|
-
# Found connection to our main chain
|
|
334
|
-
previous_block = self.blockchain.get_block(previous_hash)
|
|
335
|
-
chain_blocks.insert(0, previous_block)
|
|
336
|
-
break
|
|
337
|
-
|
|
338
|
-
# Check if block is in pending blocks
|
|
339
|
-
elif previous_hash in self.pending_blocks:
|
|
340
|
-
previous_block = self.pending_blocks[previous_hash]['block']
|
|
341
|
-
|
|
342
|
-
# Check for cycles
|
|
343
|
-
if previous_hash in visited:
|
|
344
|
-
print(f"Cycle detected in candidate chain at block {previous_block.number}")
|
|
345
|
-
return []
|
|
346
|
-
|
|
347
|
-
visited.add(previous_hash)
|
|
348
|
-
chain_blocks.insert(0, previous_block)
|
|
349
|
-
current_block = previous_block
|
|
350
|
-
else:
|
|
351
|
-
# Missing block, cannot validate the chain
|
|
352
|
-
print(f"Missing block {previous_hash.hex()} in candidate chain")
|
|
353
|
-
return []
|
|
354
|
-
|
|
355
|
-
return chain_blocks
|
|
356
|
-
|
|
357
|
-
def _validate_candidate_chain(self, chain_blocks: List[Block]) -> bool:
|
|
358
|
-
"""
|
|
359
|
-
Validate a candidate chain of blocks.
|
|
360
|
-
|
|
361
|
-
Args:
|
|
362
|
-
chain_blocks: List of blocks in the chain (oldest to newest)
|
|
363
|
-
|
|
364
|
-
Returns:
|
|
365
|
-
True if the chain is valid, False otherwise
|
|
366
|
-
"""
|
|
367
|
-
# Validate each block in the chain
|
|
368
|
-
for i, block in enumerate(chain_blocks):
|
|
369
|
-
# Skip first block, it's either genesis or a block we already have
|
|
370
|
-
if i == 0:
|
|
371
|
-
continue
|
|
372
|
-
|
|
373
|
-
# Validate block connections
|
|
374
|
-
if block.previous != chain_blocks[i-1].get_hash():
|
|
375
|
-
print(f"Invalid chain: block {block.number} does not reference previous block")
|
|
376
|
-
return False
|
|
377
|
-
|
|
378
|
-
# Validate block
|
|
379
|
-
is_valid = validate_block(block, self.blockchain.get_accounts_at_block(block.previous), self.blockchain.get_blocks())
|
|
380
|
-
if not is_valid:
|
|
381
|
-
print(f"Invalid chain: block {block.number} is invalid")
|
|
382
|
-
return False
|
|
383
|
-
|
|
384
|
-
return True
|
|
385
|
-
|
|
386
|
-
def _is_better_chain(self, chain_blocks: List[Block]) -> bool:
|
|
387
|
-
"""
|
|
388
|
-
Determine if a candidate chain is better than our current chain.
|
|
389
|
-
|
|
390
|
-
Args:
|
|
391
|
-
chain_blocks: List of blocks in the candidate chain
|
|
392
|
-
|
|
393
|
-
Returns:
|
|
394
|
-
True if the candidate chain is better, False otherwise
|
|
395
|
-
"""
|
|
396
|
-
# Get the latest block from the candidate chain
|
|
397
|
-
candidate_latest = chain_blocks[-1]
|
|
398
|
-
|
|
399
|
-
# If we don't have a latest block, any valid chain is better
|
|
400
|
-
if not self.latest_block:
|
|
401
|
-
return True
|
|
402
|
-
|
|
403
|
-
# Compare block numbers (longest chain rule)
|
|
404
|
-
if candidate_latest.number > self.latest_block.number:
|
|
405
|
-
print(f"Candidate chain is longer: {candidate_latest.number} vs {self.latest_block.number}")
|
|
406
|
-
return True
|
|
407
|
-
|
|
408
|
-
return False
|
|
409
|
-
|
|
410
|
-
def _switch_to_new_chain(self, chain_blocks: List[Block]):
|
|
411
|
-
"""
|
|
412
|
-
Switch to a new chain by adding all blocks to our blockchain.
|
|
413
|
-
|
|
414
|
-
Args:
|
|
415
|
-
chain_blocks: List of blocks in the chain (oldest to newest)
|
|
416
|
-
"""
|
|
417
|
-
# Find the point where the chains diverge
|
|
418
|
-
divergence_point = 0
|
|
419
|
-
for i, block in enumerate(chain_blocks):
|
|
420
|
-
# Check if we have this block in our blockchain
|
|
421
|
-
if hasattr(self.blockchain, 'has_block') and self.blockchain.has_block(block.get_hash()):
|
|
422
|
-
divergence_point = i + 1
|
|
423
|
-
else:
|
|
424
|
-
break
|
|
425
|
-
|
|
426
|
-
# Add all blocks after the divergence point
|
|
427
|
-
for i in range(divergence_point, len(chain_blocks)):
|
|
428
|
-
block = chain_blocks[i]
|
|
429
|
-
|
|
430
|
-
# Add block to blockchain
|
|
431
|
-
if hasattr(self.blockchain, 'add_block'):
|
|
432
|
-
try:
|
|
433
|
-
self.blockchain.add_block(block)
|
|
434
|
-
|
|
435
|
-
# Remove from pending blocks
|
|
436
|
-
block_hash = block.get_hash()
|
|
437
|
-
if block_hash in self.pending_blocks:
|
|
438
|
-
del self.pending_blocks[block_hash]
|
|
439
|
-
|
|
440
|
-
print(f"Added block {block.number} to blockchain")
|
|
441
|
-
except Exception as e:
|
|
442
|
-
print(f"Error adding block {block.number} to blockchain: {e}")
|
|
443
|
-
return
|
|
444
|
-
|
|
445
|
-
# Update latest block
|
|
446
|
-
self._update_latest_block(chain_blocks[-1])
|
|
447
|
-
print(f"Switched to new chain, latest block: {self.latest_block.number}")
|
|
File without changes
|
astreum/_node/storage/merkle.py
DELETED
|
@@ -1,224 +0,0 @@
|
|
|
1
|
-
import blake3
|
|
2
|
-
from .storage import Storage
|
|
3
|
-
from astreum import format
|
|
4
|
-
|
|
5
|
-
class MerkleNode:
|
|
6
|
-
def __init__(self, leaf: bool, data: bytes):
|
|
7
|
-
"""
|
|
8
|
-
Initialize a Merkle node.
|
|
9
|
-
|
|
10
|
-
For a leaf node, `data` is the actual content to be stored.
|
|
11
|
-
For an internal node, `data` should be the concatenation of the two child hashes.
|
|
12
|
-
|
|
13
|
-
:param leaf: A boolean flag indicating whether this node is a leaf node (True) or an internal node (False).
|
|
14
|
-
:param data: The node's data. For leaves, the stored data; for internal nodes, concatenated child hashes.
|
|
15
|
-
"""
|
|
16
|
-
self.leaf = leaf
|
|
17
|
-
self.data = data
|
|
18
|
-
self._hash = None # Cached hash value to avoid recomputation.
|
|
19
|
-
|
|
20
|
-
@classmethod
|
|
21
|
-
def from_bytes(cls, data: bytes) -> 'MerkleNode':
|
|
22
|
-
"""
|
|
23
|
-
Deserialize a MerkleNode from its byte representation.
|
|
24
|
-
|
|
25
|
-
The input bytes are expected to be in the Astreum format, containing a leaf flag and node data.
|
|
26
|
-
|
|
27
|
-
:param data: The serialized node data.
|
|
28
|
-
:return: A new MerkleNode instance.
|
|
29
|
-
"""
|
|
30
|
-
leaf_flag, node_data = format.decode(data)
|
|
31
|
-
return cls(True if leaf_flag == 1 else False, node_data)
|
|
32
|
-
|
|
33
|
-
@classmethod
|
|
34
|
-
def from_storage(cls, storage: Storage, hash_value: bytes) -> 'MerkleNode' or None:
|
|
35
|
-
"""
|
|
36
|
-
Retrieve and deserialize a MerkleNode from storage using its hash.
|
|
37
|
-
|
|
38
|
-
:param storage: The Storage instance used to retrieve the node.
|
|
39
|
-
:param hash_value: The hash key under which the node is stored.
|
|
40
|
-
:return: A MerkleNode instance if found, otherwise None.
|
|
41
|
-
"""
|
|
42
|
-
node_bytes = storage.get(hash_value)
|
|
43
|
-
if node_bytes is None:
|
|
44
|
-
return None
|
|
45
|
-
return cls.from_bytes(node_bytes)
|
|
46
|
-
|
|
47
|
-
def to_bytes(self) -> bytes:
|
|
48
|
-
"""
|
|
49
|
-
Serialize the MerkleNode into bytes using the Astreum format.
|
|
50
|
-
|
|
51
|
-
The format encodes a list containing the leaf flag and the node data.
|
|
52
|
-
|
|
53
|
-
:return: The serialized bytes representing the node.
|
|
54
|
-
"""
|
|
55
|
-
return format.encode([1 if self.leaf else 0, self.data])
|
|
56
|
-
|
|
57
|
-
def hash(self) -> bytes:
|
|
58
|
-
"""
|
|
59
|
-
Compute (or retrieve a cached) hash of the node using the Blake3 algorithm.
|
|
60
|
-
|
|
61
|
-
For leaf nodes, the hash is computed over the actual data.
|
|
62
|
-
For internal nodes, the hash is computed over the concatenated child hashes.
|
|
63
|
-
|
|
64
|
-
:return: The Blake3 digest of the node's data.
|
|
65
|
-
"""
|
|
66
|
-
if self._hash is None:
|
|
67
|
-
self._hash = blake3.blake3(self.data).digest()
|
|
68
|
-
return self._hash
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
class MerkleTree:
|
|
72
|
-
def __init__(self, storage: Storage, root_hash: bytes = None, leaves: list[bytes] = None):
|
|
73
|
-
"""
|
|
74
|
-
Initialize a Merkle tree from an existing root hash or by constructing a new tree from leaf data.
|
|
75
|
-
|
|
76
|
-
If a list of leaf data is provided, the tree will be built from the bottom up,
|
|
77
|
-
every node will be stored in the provided storage, and the computed root hash
|
|
78
|
-
will be used as the tree's identifier.
|
|
79
|
-
|
|
80
|
-
:param storage: A Storage instance used for storing and retrieving tree nodes.
|
|
81
|
-
:param root_hash: An optional existing root hash of a Merkle tree.
|
|
82
|
-
:param leaves: An optional list of leaf data (each as bytes). If provided, a new tree is built.
|
|
83
|
-
:raises ValueError: If neither root_hash nor leaves is provided.
|
|
84
|
-
"""
|
|
85
|
-
self.storage = storage
|
|
86
|
-
if leaves is not None:
|
|
87
|
-
self.root_hash = self.build_tree_from_leaves(leaves)
|
|
88
|
-
elif root_hash is not None:
|
|
89
|
-
self.root_hash = root_hash
|
|
90
|
-
else:
|
|
91
|
-
raise ValueError("Either root_hash or leaves must be provided.")
|
|
92
|
-
|
|
93
|
-
def build_tree_from_leaves(self, leaves: list[bytes]) -> bytes:
|
|
94
|
-
"""
|
|
95
|
-
Construct a Merkle tree from a list of leaf data and store each node in storage.
|
|
96
|
-
|
|
97
|
-
Each leaf data entry is wrapped in a MerkleNode (with leaf=True) and stored.
|
|
98
|
-
Then, nodes are paired (duplicating the last node if needed when the count is odd)
|
|
99
|
-
to form parent nodes. For each parent node, the data is the concatenation of its
|
|
100
|
-
two child hashes. This process repeats until a single root hash remains.
|
|
101
|
-
|
|
102
|
-
:param leaves: A list of bytes objects, each representing leaf data.
|
|
103
|
-
:return: The computed root hash of the newly built tree.
|
|
104
|
-
"""
|
|
105
|
-
# Create leaf nodes and store them.
|
|
106
|
-
current_level = []
|
|
107
|
-
for leaf_data in leaves:
|
|
108
|
-
leaf_node = MerkleNode(True, leaf_data)
|
|
109
|
-
leaf_hash = leaf_node.hash()
|
|
110
|
-
self.storage.put(leaf_hash, leaf_node.to_bytes())
|
|
111
|
-
current_level.append(leaf_hash)
|
|
112
|
-
|
|
113
|
-
# Build the tree upward until one node remains.
|
|
114
|
-
while len(current_level) > 1:
|
|
115
|
-
next_level = []
|
|
116
|
-
# If an odd number of nodes, duplicate the last node.
|
|
117
|
-
if len(current_level) % 2 == 1:
|
|
118
|
-
current_level.append(current_level[-1])
|
|
119
|
-
for i in range(0, len(current_level), 2):
|
|
120
|
-
left_hash = current_level[i]
|
|
121
|
-
right_hash = current_level[i + 1]
|
|
122
|
-
# Create a parent node from the concatenated child hashes.
|
|
123
|
-
parent_node = MerkleNode(False, left_hash + right_hash)
|
|
124
|
-
parent_hash = parent_node.hash()
|
|
125
|
-
self.storage.put(parent_hash, parent_node.to_bytes())
|
|
126
|
-
next_level.append(parent_hash)
|
|
127
|
-
current_level = next_level
|
|
128
|
-
|
|
129
|
-
# The remaining hash is the root of the tree.
|
|
130
|
-
return current_level[0]
|
|
131
|
-
|
|
132
|
-
def get(self, index: int, level: int = 0) -> bytes:
|
|
133
|
-
"""
|
|
134
|
-
Retrieve the data stored in the leaf at a given index.
|
|
135
|
-
|
|
136
|
-
The method traverses the tree from the root, using the binary representation
|
|
137
|
-
of the index to choose which branch to follow at each level. It assumes that
|
|
138
|
-
non-leaf nodes store two child hashes concatenated together (each 32 bytes).
|
|
139
|
-
|
|
140
|
-
:param index: The index of the leaf to retrieve. The bits of this number determine the path.
|
|
141
|
-
:param level: The current tree level (used internally for recursion).
|
|
142
|
-
:return: The data stored in the target leaf node, or None if not found.
|
|
143
|
-
"""
|
|
144
|
-
current_node = MerkleNode.from_storage(self.storage, self.root_hash)
|
|
145
|
-
if current_node is None:
|
|
146
|
-
return None
|
|
147
|
-
|
|
148
|
-
# If at a leaf node, return its data.
|
|
149
|
-
if current_node.leaf:
|
|
150
|
-
return current_node.data
|
|
151
|
-
|
|
152
|
-
# For non-leaf nodes, extract the left and right child hashes.
|
|
153
|
-
left_hash = current_node.data[:32]
|
|
154
|
-
right_hash = current_node.data[32:64]
|
|
155
|
-
|
|
156
|
-
# Use the bit at position `level` in the index to select the branch:
|
|
157
|
-
# 0 selects the left branch, 1 selects the right branch.
|
|
158
|
-
bit = (index >> level) & 1
|
|
159
|
-
next_hash = left_hash if bit == 0 else right_hash
|
|
160
|
-
|
|
161
|
-
# Recursively traverse the tree.
|
|
162
|
-
return MerkleTree(self.storage, root_hash=next_hash).get(index, level + 1)
|
|
163
|
-
|
|
164
|
-
def set(self, index: int, new_data: bytes) -> None:
|
|
165
|
-
"""
|
|
166
|
-
Update the leaf at the specified index with new data, rebuilding all affected nodes.
|
|
167
|
-
|
|
168
|
-
The update process recursively creates new nodes for the branch from the updated leaf
|
|
169
|
-
back to the root. At each step, the old node is removed from storage and replaced with
|
|
170
|
-
a new node that reflects the updated hash.
|
|
171
|
-
|
|
172
|
-
:param index: The index of the leaf node to update.
|
|
173
|
-
:param new_data: The new data (as bytes) to store in the leaf.
|
|
174
|
-
"""
|
|
175
|
-
self.root_hash = self._update(self.root_hash, index, 0, new_data)
|
|
176
|
-
|
|
177
|
-
def _update(self, node_hash: bytes, index: int, level: int, new_data: bytes) -> bytes:
|
|
178
|
-
"""
|
|
179
|
-
Recursive helper function to update a node on the path to the target leaf.
|
|
180
|
-
|
|
181
|
-
For a leaf node, a new node is created with the updated data.
|
|
182
|
-
For an internal node, the correct branch (determined by the index and level) is updated,
|
|
183
|
-
and a new parent node is constructed from the updated child hash and the unchanged sibling hash.
|
|
184
|
-
|
|
185
|
-
:param node_hash: The hash of the current node to update.
|
|
186
|
-
:param index: The target leaf index whose path is being updated.
|
|
187
|
-
:param level: The current depth in the tree.
|
|
188
|
-
:param new_data: The new data to set at the target leaf.
|
|
189
|
-
:return: The hash of the newly constructed node replacing the current node.
|
|
190
|
-
:raises Exception: If the node is not found in storage.
|
|
191
|
-
"""
|
|
192
|
-
current_node = MerkleNode.from_storage(self.storage, node_hash)
|
|
193
|
-
if current_node is None:
|
|
194
|
-
raise Exception("Node not found in storage")
|
|
195
|
-
|
|
196
|
-
if current_node.leaf:
|
|
197
|
-
# At the leaf, create a new node with updated data.
|
|
198
|
-
new_leaf = MerkleNode(True, new_data)
|
|
199
|
-
new_hash = new_leaf.hash()
|
|
200
|
-
self.storage.put(new_hash, new_leaf.to_bytes())
|
|
201
|
-
self.storage.delete(node_hash) # Remove the outdated node.
|
|
202
|
-
return new_hash
|
|
203
|
-
else:
|
|
204
|
-
# For non-leaf nodes, update the correct branch.
|
|
205
|
-
left_hash = current_node.data[:32]
|
|
206
|
-
right_hash = current_node.data[32:64]
|
|
207
|
-
bit = (index >> level) & 1
|
|
208
|
-
|
|
209
|
-
if bit == 0:
|
|
210
|
-
new_left_hash = self._update(left_hash, index, level + 1, new_data)
|
|
211
|
-
new_right_hash = right_hash
|
|
212
|
-
else:
|
|
213
|
-
new_left_hash = left_hash
|
|
214
|
-
new_right_hash = self._update(right_hash, index, level + 1, new_data)
|
|
215
|
-
|
|
216
|
-
# Create a new parent node with updated child hashes.
|
|
217
|
-
updated_node_data = new_left_hash + new_right_hash
|
|
218
|
-
new_node = MerkleNode(False, updated_node_data)
|
|
219
|
-
new_node_hash = new_node.hash()
|
|
220
|
-
self.storage.put(new_node_hash, new_node.to_bytes())
|
|
221
|
-
self.storage.delete(node_hash) # Remove the outdated parent node.
|
|
222
|
-
return new_node_hash
|
|
223
|
-
|
|
224
|
-
|
|
@@ -1,289 +0,0 @@
|
|
|
1
|
-
import blake3
|
|
2
|
-
from typing import Optional, List
|
|
3
|
-
from .storage import Storage
|
|
4
|
-
import astreum.format as format format.decode, format.encode
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
def common_prefix_length(a: bytes, b: bytes) -> int:
|
|
8
|
-
"""Return the number of common prefix bytes between a and b."""
|
|
9
|
-
i = 0
|
|
10
|
-
while i < len(a) and i < len(b) and a[i] == b[i]:
|
|
11
|
-
i += 1
|
|
12
|
-
return i
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class PatriciaNode:
|
|
16
|
-
def __init__(self, key: bytes, value: Optional[bytes], children: Optional[List[bytes]] = None):
|
|
17
|
-
"""
|
|
18
|
-
Initialize a Patricia node.
|
|
19
|
-
|
|
20
|
-
:param key: A compressed part of the key.
|
|
21
|
-
:param value: The stored value (if this node represents a complete key) or None.
|
|
22
|
-
:param children: A list of child node hashes (bytes). The children are ordered by the first
|
|
23
|
-
byte of the child's key.
|
|
24
|
-
"""
|
|
25
|
-
self.key = key
|
|
26
|
-
self.value = value
|
|
27
|
-
self.children = children if children is not None else []
|
|
28
|
-
self._hash: Optional[bytes] = None
|
|
29
|
-
|
|
30
|
-
@classmethod
|
|
31
|
-
def from_bytes(cls, data: bytes) -> 'PatriciaNode':
|
|
32
|
-
"""
|
|
33
|
-
Deserialize a PatriciaNode from its byte representation.
|
|
34
|
-
|
|
35
|
-
Expected format: [key, value, children]
|
|
36
|
-
where children is a list of child node hashes (bytes).
|
|
37
|
-
"""
|
|
38
|
-
decoded = format.decode(data)
|
|
39
|
-
key, value, children = decoded
|
|
40
|
-
return cls(key, value, children)
|
|
41
|
-
|
|
42
|
-
@classmethod
|
|
43
|
-
def from_storage(cls, storage: Storage, hash_value: bytes) -> Optional['PatriciaNode']:
|
|
44
|
-
"""
|
|
45
|
-
Retrieve and deserialize a PatriciaNode from storage using its hash.
|
|
46
|
-
|
|
47
|
-
:param storage: The Storage instance used to retrieve the node.
|
|
48
|
-
:param hash_value: The hash key under which the node is stored.
|
|
49
|
-
:return: A PatriciaNode instance if found, otherwise None.
|
|
50
|
-
"""
|
|
51
|
-
node_bytes = storage.get(hash_value)
|
|
52
|
-
if node_bytes is None:
|
|
53
|
-
return None
|
|
54
|
-
return cls.from_bytes(node_bytes)
|
|
55
|
-
|
|
56
|
-
def to_bytes(self) -> bytes:
|
|
57
|
-
"""
|
|
58
|
-
Serialize the PatriciaNode into bytes using the Astreum format.
|
|
59
|
-
|
|
60
|
-
Structure: [key, value, children]
|
|
61
|
-
"""
|
|
62
|
-
return format.encode([self.key, self.value, self.children])
|
|
63
|
-
|
|
64
|
-
def hash(self) -> bytes:
|
|
65
|
-
"""
|
|
66
|
-
Compute (or retrieve a cached) Blake3 hash over the node's serialized bytes.
|
|
67
|
-
"""
|
|
68
|
-
if self._hash is None:
|
|
69
|
-
self._hash = blake3.blake3(self.to_bytes()).digest()
|
|
70
|
-
return self._hash
|
|
71
|
-
|
|
72
|
-
def invalidate_hash(self) -> None:
|
|
73
|
-
"""Clear the cached hash so that it is recomputed on next use."""
|
|
74
|
-
self._hash = None
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class PatriciaTrie:
|
|
78
|
-
def __init__(self, storage: Storage, root_hash: Optional[bytes] = None):
|
|
79
|
-
"""
|
|
80
|
-
Initialize a Patricia Trie.
|
|
81
|
-
|
|
82
|
-
:param storage: A Storage instance for persisting nodes.
|
|
83
|
-
:param root_hash: Optionally, an existing root hash. If None, the trie is empty.
|
|
84
|
-
"""
|
|
85
|
-
self.storage = storage
|
|
86
|
-
self.root_hash = root_hash
|
|
87
|
-
|
|
88
|
-
def get(self, key: bytes) -> Optional[bytes]:
|
|
89
|
-
"""
|
|
90
|
-
Retrieve the value associated with the given key.
|
|
91
|
-
|
|
92
|
-
:param key: The key (as bytes) to search for.
|
|
93
|
-
:return: The stored value if found, otherwise None.
|
|
94
|
-
"""
|
|
95
|
-
if self.root_hash is None:
|
|
96
|
-
return None
|
|
97
|
-
return self._get(self.root_hash, key)
|
|
98
|
-
|
|
99
|
-
def _get(self, node_hash: bytes, key: bytes) -> Optional[bytes]:
|
|
100
|
-
node = PatriciaNode.from_storage(self.storage, node_hash)
|
|
101
|
-
if node is None:
|
|
102
|
-
return None
|
|
103
|
-
|
|
104
|
-
cp_len = common_prefix_length(key, node.key)
|
|
105
|
-
# If node.key completely matches the beginning of key...
|
|
106
|
-
if cp_len == len(node.key):
|
|
107
|
-
if cp_len == len(key):
|
|
108
|
-
return node.value
|
|
109
|
-
remainder = key[cp_len:]
|
|
110
|
-
branch = remainder[0]
|
|
111
|
-
child_hash = self._find_child(node.children, branch)
|
|
112
|
-
if child_hash is None:
|
|
113
|
-
return None
|
|
114
|
-
return self._get(child_hash, remainder)
|
|
115
|
-
return None
|
|
116
|
-
|
|
117
|
-
def put(self, key: bytes, value: bytes) -> None:
|
|
118
|
-
"""
|
|
119
|
-
Insert or update the key with the provided value.
|
|
120
|
-
|
|
121
|
-
:param key: The key (as bytes) to insert.
|
|
122
|
-
:param value: The value (as bytes) to associate with the key.
|
|
123
|
-
"""
|
|
124
|
-
if self.root_hash is None:
|
|
125
|
-
new_node = PatriciaNode(key, value, [])
|
|
126
|
-
new_hash = new_node.hash()
|
|
127
|
-
self.storage.put(new_hash, new_node.to_bytes())
|
|
128
|
-
self.root_hash = new_hash
|
|
129
|
-
else:
|
|
130
|
-
self.root_hash = self._put(self.root_hash, key, value)
|
|
131
|
-
|
|
132
|
-
def _put(self, node_hash: bytes, key: bytes, value: bytes) -> bytes:
|
|
133
|
-
"""
|
|
134
|
-
Recursive helper for inserting or updating a key.
|
|
135
|
-
|
|
136
|
-
Returns the new hash for the node that replaces the current node.
|
|
137
|
-
"""
|
|
138
|
-
node = PatriciaNode.from_storage(self.storage, node_hash)
|
|
139
|
-
if node is None:
|
|
140
|
-
# Node missing: create a new leaf.
|
|
141
|
-
new_node = PatriciaNode(key, value, [])
|
|
142
|
-
new_hash = new_node.hash()
|
|
143
|
-
self.storage.put(new_hash, new_node.to_bytes())
|
|
144
|
-
return new_hash
|
|
145
|
-
|
|
146
|
-
cp_len = common_prefix_length(key, node.key)
|
|
147
|
-
len_node_key = len(node.key)
|
|
148
|
-
len_key = len(key)
|
|
149
|
-
|
|
150
|
-
# Case 1: Exact match: update the value.
|
|
151
|
-
if cp_len == len_node_key and cp_len == len_key:
|
|
152
|
-
node.value = value
|
|
153
|
-
node.invalidate_hash()
|
|
154
|
-
new_hash = node.hash()
|
|
155
|
-
self.storage.put(new_hash, node.to_bytes())
|
|
156
|
-
self.storage.delete(node_hash)
|
|
157
|
-
return new_hash
|
|
158
|
-
|
|
159
|
-
# Case 2: Node key is a prefix of key (descend to child).
|
|
160
|
-
if cp_len == len_node_key:
|
|
161
|
-
remainder = key[cp_len:]
|
|
162
|
-
branch = remainder[0]
|
|
163
|
-
child_hash = self._find_child(node.children, branch)
|
|
164
|
-
if child_hash is not None:
|
|
165
|
-
new_child_hash = self._put(child_hash, remainder, value)
|
|
166
|
-
# Update the child pointer in the list.
|
|
167
|
-
idx = self._find_child_index(node.children, branch)
|
|
168
|
-
if idx is None:
|
|
169
|
-
raise Exception("Child not found during update.")
|
|
170
|
-
node.children[idx] = new_child_hash
|
|
171
|
-
else:
|
|
172
|
-
# Create a new leaf for the remainder.
|
|
173
|
-
new_leaf = PatriciaNode(remainder, value, [])
|
|
174
|
-
new_leaf_hash = new_leaf.hash()
|
|
175
|
-
self.storage.put(new_leaf_hash, new_leaf.to_bytes())
|
|
176
|
-
self._insert_child(node.children, new_leaf_hash)
|
|
177
|
-
node.invalidate_hash()
|
|
178
|
-
new_hash = node.hash()
|
|
179
|
-
self.storage.put(new_hash, node.to_bytes())
|
|
180
|
-
self.storage.delete(node_hash)
|
|
181
|
-
return new_hash
|
|
182
|
-
|
|
183
|
-
# Case 3: Key is a prefix of node.key (split node).
|
|
184
|
-
if cp_len == len_key and cp_len < len_node_key:
|
|
185
|
-
old_suffix = node.key[cp_len:]
|
|
186
|
-
node.key = old_suffix # update node to hold only the suffix
|
|
187
|
-
node.invalidate_hash()
|
|
188
|
-
|
|
189
|
-
# Create a new branch node with the key as prefix and value.
|
|
190
|
-
branch_node = PatriciaNode(key, value, [])
|
|
191
|
-
# The existing node becomes a child of the branch.
|
|
192
|
-
self._insert_child(branch_node.children, node.hash())
|
|
193
|
-
branch_hash = branch_node.hash()
|
|
194
|
-
self.storage.put(branch_hash, branch_node.to_bytes())
|
|
195
|
-
self.storage.put(node.hash(), node.to_bytes())
|
|
196
|
-
self.storage.delete(node_hash)
|
|
197
|
-
return branch_hash
|
|
198
|
-
|
|
199
|
-
# Case 4: Partial common prefix (split into a branch with two children).
|
|
200
|
-
if cp_len < len_node_key and cp_len < len_key:
|
|
201
|
-
common_prefix = key[:cp_len]
|
|
202
|
-
old_suffix = node.key[cp_len:]
|
|
203
|
-
new_suffix = key[cp_len:]
|
|
204
|
-
branch_node = PatriciaNode(common_prefix, None, [])
|
|
205
|
-
|
|
206
|
-
# Adjust the existing node.
|
|
207
|
-
node.key = old_suffix
|
|
208
|
-
node.invalidate_hash()
|
|
209
|
-
self._insert_child(branch_node.children, node.hash())
|
|
210
|
-
|
|
211
|
-
# Create a new leaf for the new key’s remaining portion.
|
|
212
|
-
new_leaf = PatriciaNode(new_suffix, value, [])
|
|
213
|
-
new_leaf_hash = new_leaf.hash()
|
|
214
|
-
self.storage.put(new_leaf_hash, new_leaf.to_bytes())
|
|
215
|
-
self._insert_child(branch_node.children, new_leaf_hash)
|
|
216
|
-
|
|
217
|
-
branch_hash = branch_node.hash()
|
|
218
|
-
self.storage.put(branch_hash, branch_node.to_bytes())
|
|
219
|
-
self.storage.put(node.hash(), node.to_bytes())
|
|
220
|
-
self.storage.delete(node_hash)
|
|
221
|
-
return branch_hash
|
|
222
|
-
|
|
223
|
-
raise Exception("Unhandled case in PatriciaTrie.put")
|
|
224
|
-
|
|
225
|
-
def _find_child(self, children: List[bytes], branch: int) -> Optional[bytes]:
|
|
226
|
-
"""
|
|
227
|
-
Perform a binary search over the ordered children list to find the child hash whose
|
|
228
|
-
branch (first byte of its key) equals the target branch.
|
|
229
|
-
"""
|
|
230
|
-
lo = 0
|
|
231
|
-
hi = len(children)
|
|
232
|
-
while lo < hi:
|
|
233
|
-
mid = (lo + hi) // 2
|
|
234
|
-
child_hash = children[mid]
|
|
235
|
-
child_node = PatriciaNode.from_storage(self.storage, child_hash)
|
|
236
|
-
if child_node is None or not child_node.key:
|
|
237
|
-
raise Exception("Child node missing or has empty key.")
|
|
238
|
-
child_branch = child_node.key[0]
|
|
239
|
-
if child_branch == branch:
|
|
240
|
-
return child_hash
|
|
241
|
-
elif child_branch < branch:
|
|
242
|
-
lo = mid + 1
|
|
243
|
-
else:
|
|
244
|
-
hi = mid
|
|
245
|
-
return None
|
|
246
|
-
|
|
247
|
-
def _find_child_index(self, children: List[bytes], branch: int) -> Optional[int]:
|
|
248
|
-
"""
|
|
249
|
-
Similar to _find_child but returns the index in the list.
|
|
250
|
-
"""
|
|
251
|
-
lo = 0
|
|
252
|
-
hi = len(children)
|
|
253
|
-
while lo < hi:
|
|
254
|
-
mid = (lo + hi) // 2
|
|
255
|
-
child_hash = children[mid]
|
|
256
|
-
child_node = PatriciaNode.from_storage(self.storage, child_hash)
|
|
257
|
-
if child_node is None or not child_node.key:
|
|
258
|
-
raise Exception("Child node missing or has empty key.")
|
|
259
|
-
child_branch = child_node.key[0]
|
|
260
|
-
if child_branch == branch:
|
|
261
|
-
return mid
|
|
262
|
-
elif child_branch < branch:
|
|
263
|
-
lo = mid + 1
|
|
264
|
-
else:
|
|
265
|
-
hi = mid
|
|
266
|
-
return None
|
|
267
|
-
|
|
268
|
-
def _insert_child(self, children: List[bytes], new_child_hash: bytes) -> None:
|
|
269
|
-
"""
|
|
270
|
-
Insert a new child hash into the ordered children list.
|
|
271
|
-
"""
|
|
272
|
-
new_child_node = PatriciaNode.from_storage(self.storage, new_child_hash)
|
|
273
|
-
if new_child_node is None or not new_child_node.key:
|
|
274
|
-
raise Exception("New child node missing or has empty key.")
|
|
275
|
-
new_branch = new_child_node.key[0]
|
|
276
|
-
lo = 0
|
|
277
|
-
hi = len(children)
|
|
278
|
-
while lo < hi:
|
|
279
|
-
mid = (lo + hi) // 2
|
|
280
|
-
child_hash = children[mid]
|
|
281
|
-
child_node = PatriciaNode.from_storage(self.storage, child_hash)
|
|
282
|
-
if child_node is None or not child_node.key:
|
|
283
|
-
raise Exception("Child node missing or has empty key.")
|
|
284
|
-
child_branch = child_node.key[0]
|
|
285
|
-
if child_branch < new_branch:
|
|
286
|
-
lo = mid + 1
|
|
287
|
-
else:
|
|
288
|
-
hi = mid
|
|
289
|
-
children.insert(lo, new_child_hash)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|