astreum 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of astreum might be problematic. Click here for more details.

astreum/__init__.py CHANGED
@@ -1 +1,2 @@
1
1
  from .machine import AstreumMachine
2
+ from .node import Node
@@ -17,12 +17,11 @@ from astreum.lispeum.tokenizer import tokenize
17
17
  from astreum.lispeum.parser import parse
18
18
 
19
19
  class AstreumMachine:
20
- def __init__(self):
21
- self.global_env = Environment()
22
-
20
+ def __init__(self, node: 'Node' = None):
21
+ self.global_env = Environment(node=node)
23
22
  self.sessions: Dict[str, Environment] = {}
24
-
25
23
  self.lock = threading.Lock()
24
+
26
25
 
27
26
  def create_session(self) -> str:
28
27
  session_id = str(uuid.uuid4())
@@ -3,14 +3,21 @@ from astreum.lispeum.expression import Expr
3
3
 
4
4
 
5
5
  class Environment:
6
- def __init__(self, parent: 'Environment' = None):
6
+ def __init__(self, parent: 'Environment' = None, node: 'Node' = None):
7
7
  self.data: Dict[str, Expr] = {}
8
8
  self.parent = parent
9
+ self.node = node
9
10
 
10
11
  def set(self, name: str, value: Expr):
11
- self.data[name] = value
12
+ if self.node:
13
+ self.node.post_global_storage(name, value)
14
+ else:
15
+ self.data[name] = value
12
16
 
13
17
  def get(self, name: str) -> Optional[Expr]:
18
+ if self.node:
19
+ return self.node.query_global_storage(name)
20
+
14
21
  if name in self.data:
15
22
  return self.data[name]
16
23
  elif self.parent:
@@ -0,0 +1,416 @@
1
+ import os
2
+ import hashlib
3
+ import time
4
+ from typing import Tuple
5
+
6
+ from .relay import Relay, Topic
7
+ from .relay.peer import Peer
8
+ from .storage import Storage
9
+ from .route_table import RouteTable
10
+ from .machine import AstreumMachine
11
+ from .utils import encode, decode
12
+ from .models import Block, Transaction
13
+
14
+ class Node:
15
+ def __init__(self, config: dict):
16
+ self.config = config
17
+ self.node_id = config.get('node_id', os.urandom(32)) # Default to random ID if not provided
18
+ self.relay = Relay(config)
19
+ self.storage = Storage(config)
20
+ self.machine = AstreumMachine(config)
21
+ self.route_table = RouteTable(config, self.node_id)
22
+
23
+ # Latest block of the chain this node is following
24
+ self.latest_block = None
25
+ self.followed_chain_id = config.get('followed_chain_id', None)
26
+
27
+ # Candidate chains that might be adopted
28
+ self.candidate_chains = {} # chain_id -> {'latest_block': block, 'timestamp': time.time()}
29
+
30
+ # Register message handlers
31
+ self._register_message_handlers()
32
+
33
+ # Initialize latest block from storage if available
34
+ self._initialize_latest_block()
35
+
36
+ def _register_message_handlers(self):
37
+ """Register handlers for different message topics."""
38
+ self.relay.register_message_handler(Topic.PING, self._handle_ping)
39
+ self.relay.register_message_handler(Topic.PONG, self._handle_pong)
40
+ self.relay.register_message_handler(Topic.OBJECT_REQUEST, self._handle_object_request)
41
+ self.relay.register_message_handler(Topic.OBJECT, self._handle_object)
42
+ self.relay.register_message_handler(Topic.ROUTE_REQUEST, self._handle_route_request)
43
+ self.relay.register_message_handler(Topic.ROUTE, self._handle_route)
44
+ self.relay.register_message_handler(Topic.LATEST_BLOCK_REQUEST, self._handle_latest_block_request)
45
+ self.relay.register_message_handler(Topic.LATEST_BLOCK, self._handle_latest_block)
46
+ self.relay.register_message_handler(Topic.TRANSACTION, self._handle_transaction)
47
+
48
+ def _handle_ping(self, body: bytes, addr: Tuple[str, int], envelope):
49
+ """
50
+ Handle ping messages by storing peer info and responding with a pong.
51
+
52
+ The ping message contains:
53
+ - public_key: The sender's public key
54
+ - difficulty: The sender's preferred proof-of-work difficulty
55
+ - routes: The sender's available routes
56
+ """
57
+ try:
58
+ # Parse peer information from the ping message
59
+ parts = decode(body)
60
+ if len(parts) != 3:
61
+ return
62
+
63
+ public_key, difficulty_bytes, routes_data = parts
64
+ difficulty = int.from_bytes(difficulty_bytes, byteorder='big')
65
+
66
+ # Store peer information in routing table
67
+ peer = self.route_table.update_peer(addr, public_key, difficulty)
68
+
69
+ # Process the routes the sender is participating in
70
+ if routes_data:
71
+ # routes_data is a simple list like [0, 1] meaning peer route and validation route
72
+ # Add peer to each route they participate in
73
+ self.relay.add_peer_to_route(peer, list(routes_data))
74
+
75
+ # Create response with our public key, difficulty and routes we participate in
76
+ pong_data = encode([
77
+ self.node_id, # Our public key
78
+ self.config.get('difficulty', 1).to_bytes(4, byteorder='big'), # Our difficulty
79
+ self.relay.get_routes() # Our routes as bytes([0, 1]) for peer and validation
80
+ ])
81
+
82
+ self.relay.send_message(pong_data, Topic.PONG, addr)
83
+ except Exception as e:
84
+ print(f"Error handling ping message: {e}")
85
+
86
+ def _handle_pong(self, body: bytes, addr: Tuple[str, int], envelope):
87
+ """
88
+ Handle pong messages by updating peer information.
89
+ No response is sent to a pong message.
90
+ """
91
+ try:
92
+ # Parse peer information from the pong message
93
+ parts = decode(body)
94
+ if len(parts) != 3:
95
+ return
96
+
97
+ public_key, difficulty_bytes, routes_data = parts
98
+ difficulty = int.from_bytes(difficulty_bytes, byteorder='big')
99
+
100
+ # Update peer information in routing table
101
+ peer = self.route_table.update_peer(addr, public_key, difficulty)
102
+
103
+ # Process the routes the sender is participating in
104
+ if routes_data:
105
+ # routes_data is a simple list like [0, 1] meaning peer route and validation route
106
+ # Add peer to each route they participate in
107
+ self.relay.add_peer_to_route(peer, list(routes_data))
108
+ except Exception as e:
109
+ print(f"Error handling pong message: {e}")
110
+
111
+ def _handle_object_request(self, body: bytes, addr: Tuple[str, int], envelope):
112
+ """
113
+ Handle request for an object by its hash.
114
+ Check storage and return if available, otherwise ignore.
115
+ """
116
+ try:
117
+ # The body is the hash of the requested object
118
+ object_hash = body
119
+ object_data = self.storage.get(object_hash)
120
+
121
+ if object_data:
122
+ # Object found, send it back
123
+ self.relay.send_message(object_data, Topic.OBJECT, addr)
124
+ # If object not found, simply ignore the request
125
+ except Exception as e:
126
+ print(f"Error handling object request: {e}")
127
+
128
+ def _handle_object(self, body: bytes, addr: Tuple[str, int], envelope):
129
+ """
130
+ Handle receipt of an object.
131
+ If not in storage, verify the hash and put in storage.
132
+ """
133
+ try:
134
+ # Verify hash matches the object
135
+ object_hash = hashlib.sha256(body).digest()
136
+
137
+ # Check if we already have this object
138
+ if not self.storage.exists(object_hash):
139
+ # Store the object
140
+ self.storage.put(object_hash, body)
141
+ except Exception as e:
142
+ print(f"Error handling object: {e}")
143
+
144
+ def _handle_route_request(self, body: bytes, addr: Tuple[str, int], envelope):
145
+ """
146
+ Handle request for routing information.
147
+ Seed route to peer with one peer per bucket in the route table.
148
+ """
149
+ try:
150
+ # Create a list to store one peer from each bucket
151
+ route_peers = []
152
+
153
+ # Get one peer from each bucket
154
+ for bucket_index in range(self.route_table.num_buckets):
155
+ peers = self.route_table.get_bucket_peers(bucket_index)
156
+ if peers and len(peers) > 0:
157
+ # Add one peer from this bucket
158
+ route_peers.append(peers[0])
159
+
160
+ # Serialize the peer list
161
+ # Format: List of [peer_addr, peer_port, peer_key]
162
+ peer_data = []
163
+ for peer in route_peers:
164
+ peer_addr, peer_port = peer.address
165
+ peer_data.append(encode([
166
+ peer_addr.encode('utf-8'),
167
+ peer_port.to_bytes(2, byteorder='big'),
168
+ peer.node_id
169
+ ]))
170
+
171
+ # Encode the complete route data
172
+ route_data = encode(peer_data)
173
+
174
+ # Send routing information back
175
+ self.relay.send_message(route_data, Topic.ROUTE, addr)
176
+ except Exception as e:
177
+ print(f"Error handling route request: {e}")
178
+
179
+ def _handle_route(self, body: bytes, addr: Tuple[str, int], envelope):
180
+ """
181
+ Handle receipt of a route message containing a list of IP addresses to ping.
182
+ """
183
+ try:
184
+ # Decode the list of peers
185
+ peer_entries = decode(body)
186
+
187
+ # Process each peer
188
+ for peer_data in peer_entries:
189
+ try:
190
+ peer_parts = decode(peer_data)
191
+ if len(peer_parts) != 3:
192
+ continue
193
+
194
+ peer_addr_bytes, peer_port_bytes, peer_id = peer_parts
195
+ peer_addr = peer_addr_bytes.decode('utf-8')
196
+ peer_port = int.from_bytes(peer_port_bytes, byteorder='big')
197
+
198
+ # Create peer address tuple
199
+ peer_address = (peer_addr, peer_port)
200
+
201
+ # Ping this peer if it's not already in our routing table
202
+ # and it's not our own address
203
+ if (not self.route_table.has_peer(peer_address) and
204
+ peer_address != self.relay.get_address()):
205
+ # Create ping message with our info and routes
206
+ # Encode our peer and validation routes
207
+ peer_routes_list = self.relay.get_routes()
208
+
209
+ # Combine into a single list of routes with type flags
210
+ # For each route: [is_validation_route, route_id]
211
+ routes = []
212
+
213
+ # Add peer routes (type flag = 0)
214
+ for route in peer_routes_list:
215
+ routes.append(encode([bytes([0]), route]))
216
+
217
+ # Encode the complete routes list
218
+ all_routes = encode(routes)
219
+
220
+ ping_data = encode([
221
+ self.node_id, # Our public key
222
+ self.config.get('difficulty', 1).to_bytes(4, byteorder='big'), # Our difficulty
223
+ all_routes # All routes we participate in
224
+ ])
225
+
226
+ # Send ping to the peer
227
+ self.relay.send_message(ping_data, Topic.PING, peer_address)
228
+ except Exception as e:
229
+ print(f"Error processing peer in route: {e}")
230
+ continue
231
+ except Exception as e:
232
+ print(f"Error handling route message: {e}")
233
+
234
+ def _handle_latest_block_request(self, body: bytes, addr: Tuple[str, int], envelope):
235
+ """
236
+ Handle request for the latest block from the chain currently following.
237
+ Any node can request the latest block for syncing purposes.
238
+ """
239
+ try:
240
+ # Return our latest block from the followed chain
241
+ if self.latest_block:
242
+ # Send latest block to the requester
243
+ self.relay.send_message(self.latest_block.to_bytes(), Topic.LATEST_BLOCK, addr)
244
+ except Exception as e:
245
+ print(f"Error handling latest block request: {e}")
246
+
247
+ def _handle_latest_block(self, body: bytes, addr: Tuple[str, int], envelope):
248
+ """
249
+ Handle receipt of a latest block message.
250
+ Identify chain, validate if following chain, only accept if latest block
251
+ in chain is in the previous field.
252
+ """
253
+ try:
254
+ # Check if we're in the validation route
255
+ # This is now already checked by the relay's _handle_message method
256
+ if not self.relay.is_in_validation_route():
257
+ return
258
+
259
+ # Deserialize the block
260
+ block = Block.from_bytes(body)
261
+ if not block:
262
+ return
263
+
264
+ # Check if we're following this chain
265
+ if not self.machine.is_following_chain(block.chain_id):
266
+ # Store as a potential candidate chain if it has a higher height
267
+ if not self.followed_chain_id or block.chain_id != self.followed_chain_id:
268
+ self._add_candidate_chain(block)
269
+ return
270
+
271
+ # Get our current latest block
272
+ our_latest = self.latest_block
273
+
274
+ # Verify block hash links to our latest block
275
+ if our_latest and block.previous_hash == our_latest.hash:
276
+ # Process the valid block
277
+ self.machine.process_block(block)
278
+
279
+ # Update our latest block
280
+ self.latest_block = block
281
+ # Check if this block is ahead of our current chain
282
+ elif our_latest and block.height > our_latest.height:
283
+ # Block is ahead but doesn't link directly to our latest
284
+ # Add to candidate chains for potential future adoption
285
+ self._add_candidate_chain(block)
286
+
287
+ # No automatic broadcasting - nodes will request latest blocks when needed
288
+ except Exception as e:
289
+ print(f"Error handling latest block: {e}")
290
+
291
+ def _handle_transaction(self, body: bytes, addr: Tuple[str, int], envelope):
292
+ """
293
+ Handle receipt of a transaction.
294
+ Accept if validation route is present and counter is valid relative to the latest block in our chain.
295
+ """
296
+ try:
297
+ # Check if we're in the validation route
298
+ # This is now already checked by the relay's _handle_message method
299
+ if not self.relay.is_in_validation_route():
300
+ return
301
+
302
+ # Deserialize the transaction
303
+ transaction = Transaction.from_bytes(body)
304
+ if not transaction:
305
+ return
306
+
307
+ # Check if we're following this chain
308
+ if not self.machine.is_following_chain(transaction.chain_id):
309
+ return
310
+
311
+ # Verify transaction has a valid validation route
312
+ if not transaction.has_valid_route():
313
+ return
314
+
315
+ # Get latest block from this chain
316
+ latest_block = self.machine.get_latest_block(transaction.chain_id)
317
+ if not latest_block:
318
+ return
319
+
320
+ # Verify transaction counter is valid relative to the latest block
321
+ if not transaction.is_counter_valid(latest_block):
322
+ return
323
+
324
+ # Process the valid transaction
325
+ self.machine.process_transaction(transaction)
326
+
327
+ # Relay to other peers in the validation route
328
+ validation_peers = self.relay.get_route_peers(1) # 1 = validation route
329
+ for peer in validation_peers:
330
+ if peer.address != addr: # Don't send back to originator
331
+ self.relay.send_message(body, Topic.TRANSACTION, peer.address)
332
+ except Exception as e:
333
+ print(f"Error handling transaction: {e}")
334
+
335
+ def _initialize_latest_block(self):
336
+ """Initialize the latest block from storage if available."""
337
+ try:
338
+ if self.followed_chain_id:
339
+ # Get the latest block for the chain we're following
340
+ self.latest_block = self.machine.get_latest_block(self.followed_chain_id)
341
+ else:
342
+ # If no specific chain is set to follow, get the latest block from the default chain
343
+ self.latest_block = self.machine.get_latest_block()
344
+
345
+ # If we have a latest block, set the followed chain ID
346
+ if self.latest_block:
347
+ self.followed_chain_id = self.latest_block.chain_id
348
+ except Exception as e:
349
+ print(f"Error initializing latest block: {e}")
350
+
351
+ def set_followed_chain(self, chain_id):
352
+ """
353
+ Set the chain that this node follows.
354
+
355
+ Args:
356
+ chain_id: The ID of the chain to follow
357
+ """
358
+ self.followed_chain_id = chain_id
359
+ self.latest_block = self.machine.get_latest_block(chain_id)
360
+
361
+ def get_latest_block(self):
362
+ """
363
+ Get the latest block of the chain this node is following.
364
+
365
+ Returns:
366
+ The latest block, or None if not available
367
+ """
368
+ return self.latest_block
369
+
370
+ def _add_candidate_chain(self, block):
371
+ """
372
+ Add a block to candidate chains for potential future adoption.
373
+
374
+ Args:
375
+ block: The block to add as a candidate
376
+ """
377
+ chain_id = block.chain_id
378
+
379
+ # If we already have this chain as a candidate, only update if this block is newer
380
+ if chain_id in self.candidate_chains:
381
+ current_candidate = self.candidate_chains[chain_id]['latest_block']
382
+ if block.height > current_candidate.height:
383
+ self.candidate_chains[chain_id] = {
384
+ 'latest_block': block,
385
+ 'timestamp': time.time()
386
+ }
387
+ else:
388
+ # Add as a new candidate chain
389
+ self.candidate_chains[chain_id] = {
390
+ 'latest_block': block,
391
+ 'timestamp': time.time()
392
+ }
393
+
394
+ # Prune old candidates (older than 1 hour)
395
+ self._prune_candidate_chains()
396
+
397
+ def _prune_candidate_chains(self):
398
+ """Remove candidate chains that are older than 1 hour."""
399
+ current_time = time.time()
400
+ chains_to_remove = []
401
+
402
+ for chain_id, data in self.candidate_chains.items():
403
+ if current_time - data['timestamp'] > 3600: # 1 hour in seconds
404
+ chains_to_remove.append(chain_id)
405
+
406
+ for chain_id in chains_to_remove:
407
+ del self.candidate_chains[chain_id]
408
+
409
+ def evaluate_candidate_chains(self):
410
+ """
411
+ Evaluate all candidate chains to see if we should switch to one.
412
+ This is a placeholder for now - in a real implementation, you would
413
+ verify the chain and potentially switch to it if it's valid and better.
414
+ """
415
+ # TODO: Implement chain evaluation logic
416
+ pass
astreum/node/models.py ADDED
@@ -0,0 +1,96 @@
1
+ import socket
2
+ from pathlib import Path
3
+ from typing import Optional, Tuple
4
+ from astreum.machine import AstreumMachine
5
+ from .relay import Relay
6
+ from .relay.message import Topic
7
+ from .relay.route import RouteTable
8
+ from .relay.peer import Peer
9
+ import os
10
+
11
+ class Storage:
12
+ def __init__(self, config: dict):
13
+ self.max_space = config.get('max_storage_space', 1024 * 1024 * 1024) # Default 1GB
14
+ self.current_space = 0
15
+ self.storage_path = Path(config.get('storage_path', 'storage'))
16
+ self.storage_path.mkdir(parents=True, exist_ok=True)
17
+
18
+ # Calculate current space usage
19
+ self.current_space = sum(f.stat().st_size for f in self.storage_path.glob('*') if f.is_file())
20
+
21
+ def put(self, data_hash: bytes, data: bytes) -> bool:
22
+ """Store data with its hash. Returns True if successful, False if space limit exceeded."""
23
+ data_size = len(data)
24
+ if self.current_space + data_size > self.max_space:
25
+ return False
26
+
27
+ file_path = self.storage_path / data_hash.hex()
28
+
29
+ # Don't store if already exists
30
+ if file_path.exists():
31
+ return True
32
+
33
+ # Store the data
34
+ file_path.write_bytes(data)
35
+ self.current_space += data_size
36
+ return True
37
+
38
+ def get(self, data_hash: bytes) -> Optional[bytes]:
39
+ """Retrieve data by its hash. Returns None if not found."""
40
+ file_path = self.storage_path / data_hash.hex()
41
+ if not file_path.exists():
42
+ return None
43
+ return file_path.read_bytes()
44
+
45
+ def contains(self, data_hash: bytes) -> bool:
46
+ """Check if data exists in storage."""
47
+ return (self.storage_path / data_hash.hex()).exists()
48
+
49
+ class Account:
50
+ def __init__(self, public_key: bytes, balance: int, counter: int):
51
+ self.public_key = public_key
52
+ self.balance = balance
53
+ self.counter = counter
54
+
55
+ class Block:
56
+ def __init__(
57
+ self,
58
+ accounts: bytes,
59
+ chain: Chain,
60
+ difficulty: int,
61
+ delay: int,
62
+ number: int,
63
+ previous: Block,
64
+ receipts: bytes,
65
+ aster: int,
66
+ time: int,
67
+ transactions: bytes,
68
+ validator: Account,
69
+ signature: bytes
70
+ ):
71
+ self.accounts = accounts
72
+ self.chain = chain
73
+ self.difficulty = difficulty
74
+ self.delay = delay
75
+ self.number = number
76
+ self.previous = previous
77
+ self.receipts = receipts
78
+ self.aster = aster
79
+ self.time = time
80
+ self.transactions = transactions
81
+ self.validator = validator
82
+ self.signature = signature
83
+
84
+ class Chain:
85
+ def __init__(self, latest_block: Block):
86
+ self.latest_block = latest_block
87
+
88
+ class Transaction:
89
+ def __init__(self, chain: Chain, receipient: Account, sender: Account, counter: int, amount: int, signature: bytes, data: bytes):
90
+ self.chain = chain
91
+ self.receipient = receipient
92
+ self.sender = sender
93
+ self.counter = counter
94
+ self.amount = amount
95
+ self.signature = signature
96
+ self.data = data