astreum 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of astreum might be problematic. Click here for more details.

Files changed (45) hide show
  1. astreum/node.py +10 -0
  2. {astreum-0.2.3.dist-info → astreum-0.2.5.dist-info}/METADATA +1 -1
  3. astreum-0.2.5.dist-info/RECORD +18 -0
  4. astreum/_node/relay/__init__.py +0 -371
  5. astreum/_node/relay/bucket.py +0 -90
  6. astreum/_node/relay/envelope.py +0 -280
  7. astreum/_node/relay/message.py +0 -110
  8. astreum/_node/relay/peer.py +0 -174
  9. astreum/_node/relay/route.py +0 -161
  10. astreum/_node/storage/storage.py +0 -253
  11. astreum/_node/storage/utils.py +0 -137
  12. astreum/_node/utils.py +0 -34
  13. astreum/_node/validation/__init__.py +0 -0
  14. astreum/_node/validation/_block/__init__.py +0 -0
  15. astreum/_node/validation/_block/create.py +0 -98
  16. astreum/_node/validation/_block/model.py +0 -81
  17. astreum/_node/validation/_block/validate.py +0 -196
  18. astreum/_node/validation/account.py +0 -99
  19. astreum/_node/validation/block.py +0 -21
  20. astreum/_node/validation/constants.py +0 -15
  21. astreum/_node/validation/stake.py +0 -229
  22. astreum/_node/validation/transaction.py +0 -146
  23. astreum/_node/validation/vdf.py +0 -80
  24. astreum/lispeum/expression.py +0 -95
  25. astreum/lispeum/special/__init__.py +0 -0
  26. astreum/lispeum/special/definition.py +0 -27
  27. astreum/lispeum/special/list/__init__.py +0 -0
  28. astreum/lispeum/special/list/all.py +0 -32
  29. astreum/lispeum/special/list/any.py +0 -32
  30. astreum/lispeum/special/list/fold.py +0 -29
  31. astreum/lispeum/special/list/get.py +0 -20
  32. astreum/lispeum/special/list/insert.py +0 -23
  33. astreum/lispeum/special/list/map.py +0 -30
  34. astreum/lispeum/special/list/position.py +0 -33
  35. astreum/lispeum/special/list/remove.py +0 -22
  36. astreum/lispeum/special/number/__init__.py +0 -0
  37. astreum/lispeum/special/number/addition.py +0 -0
  38. astreum/lispeum/storage.py +0 -410
  39. astreum/machine/__init__.py +0 -352
  40. astreum/machine/environment.py +0 -4
  41. astreum/machine/error.py +0 -0
  42. astreum-0.2.3.dist-info/RECORD +0 -56
  43. {astreum-0.2.3.dist-info → astreum-0.2.5.dist-info}/WHEEL +0 -0
  44. {astreum-0.2.3.dist-info → astreum-0.2.5.dist-info}/licenses/LICENSE +0 -0
  45. {astreum-0.2.3.dist-info → astreum-0.2.5.dist-info}/top_level.txt +0 -0
@@ -1,161 +0,0 @@
1
- """
2
- Kademlia-style routing table implementation for Astreum node.
3
- """
4
-
5
- from typing import List, Dict, Set, Tuple, Optional
6
- from .bucket import KBucket
7
- from .peer import Peer, PeerManager
8
-
9
- class RouteTable:
10
- """
11
- Kademlia-style routing table using k-buckets.
12
-
13
- The routing table consists of k-buckets, each covering a specific range of distances.
14
- In Kademlia, bucket index (i) contains nodes that share exactly i bits with the local node:
15
- - Bucket 0: Contains peers that don't share the first bit with our node ID
16
- - Bucket 1: Contains peers that share the first bit, but differ on the second bit
17
- - Bucket 2: Contains peers that share the first two bits, but differ on the third bit
18
- - And so on...
19
-
20
- This structuring ensures efficient routing to any node in the network.
21
- """
22
-
23
- def __init__(self, relay):
24
- """
25
- Initialize the routing table.
26
-
27
- Args:
28
- relay: The relay instance this route table belongs to
29
- """
30
- self.relay = relay
31
- self.our_node_id = relay.node_id
32
- self.bucket_size = relay.config.get('max_peers_per_bucket', 20)
33
- # Initialize buckets - for a 256-bit key, we need up to 256 buckets
34
- self.buckets = {i: KBucket(k=self.bucket_size) for i in range(256)}
35
- self.peer_manager = PeerManager(self.our_node_id)
36
-
37
- def add_peer(self, peer: Peer) -> bool:
38
- """
39
- Add a peer to the appropriate k-bucket based on bit prefix matching.
40
-
41
- Args:
42
- peer (Peer): The peer to add
43
-
44
- Returns:
45
- bool: True if the peer was added, False otherwise
46
- """
47
- # Calculate the number of matching prefix bits
48
- matching_bits = self.peer_manager.calculate_distance(peer.public_key)
49
-
50
- # Add to the appropriate bucket based on the number of matching bits
51
- return self.buckets[matching_bits].add(peer)
52
-
53
- def update_peer(self, addr: tuple, public_key: bytes, difficulty: int = 1) -> Peer:
54
- """
55
- Update or add a peer to the routing table.
56
-
57
- Args:
58
- addr: Tuple of (ip, port)
59
- public_key: Peer's public key
60
- difficulty: Peer's proof-of-work difficulty
61
-
62
- Returns:
63
- Peer: The updated or added peer
64
- """
65
- # Create or update the peer
66
- peer = self.peer_manager.add_or_update_peer(addr, public_key)
67
- peer.difficulty = difficulty
68
-
69
- # Add to the appropriate bucket
70
- self.add_peer(peer)
71
-
72
- return peer
73
-
74
- def remove_peer(self, peer: Peer) -> bool:
75
- """
76
- Remove a peer from its k-bucket.
77
-
78
- Args:
79
- peer (Peer): The peer to remove
80
-
81
- Returns:
82
- bool: True if the peer was removed, False otherwise
83
- """
84
- matching_bits = self.peer_manager.calculate_distance(peer.public_key)
85
- if matching_bits in self.buckets:
86
- return self.buckets[matching_bits].remove(peer)
87
- return False
88
-
89
- def get_closest_peers(self, target_id: bytes, count: int = 3) -> List[Peer]:
90
- """
91
- Get the closest peers to the target ID.
92
-
93
- Args:
94
- target_id: Target ID to find peers close to
95
- count: Maximum number of peers to return
96
-
97
- Returns:
98
- List of peers closest to the target ID
99
- """
100
- # Calculate the number of matching prefix bits with the target
101
- matching_bits = self.peer_manager.calculate_distance(target_id)
102
-
103
- closest_peers = []
104
-
105
- # First check the exact matching bucket
106
- if matching_bits in self.buckets:
107
- bucket_peers = self.buckets[matching_bits].get_peers()
108
- closest_peers.extend(bucket_peers)
109
-
110
- # If we need more peers, also check adjacent buckets (farther first)
111
- if len(closest_peers) < count:
112
- # Check buckets with fewer matching bits (higher XOR distance)
113
- for i in range(matching_bits - 1, -1, -1):
114
- if i in self.buckets:
115
- bucket_peers = self.buckets[i].get_peers()
116
- closest_peers.extend(bucket_peers)
117
- if len(closest_peers) >= count:
118
- break
119
-
120
- # If still not enough, check buckets with more matching bits
121
- if len(closest_peers) < count:
122
- for i in range(matching_bits + 1, 256):
123
- if i in self.buckets:
124
- bucket_peers = self.buckets[i].get_peers()
125
- closest_peers.extend(bucket_peers)
126
- if len(closest_peers) >= count:
127
- break
128
-
129
- # Return the closest peers, limited by count
130
- return closest_peers[:count]
131
-
132
- def get_bucket_peers(self, bucket_index: int) -> List[Peer]:
133
- """
134
- Get all peers from a specific bucket.
135
-
136
- Args:
137
- bucket_index: Index of the bucket to get peers from
138
-
139
- Returns:
140
- List of peers in the bucket
141
- """
142
- if bucket_index in self.buckets:
143
- return self.buckets[bucket_index].get_peers()
144
- return []
145
-
146
- def has_peer(self, addr: tuple) -> bool:
147
- """
148
- Check if a peer with the given address exists in the routing table.
149
-
150
- Args:
151
- addr: Tuple of (ip, port)
152
-
153
- Returns:
154
- bool: True if the peer exists, False otherwise
155
- """
156
- return self.peer_manager.get_peer_by_address(addr) is not None
157
-
158
- @property
159
- def num_buckets(self) -> int:
160
- """Get the number of active buckets."""
161
- return len(self.buckets)
@@ -1,253 +0,0 @@
1
- """
2
- Storage implementation for the Astreum node.
3
-
4
- This module provides a Storage class that manages persistent storage
5
- of data using either in-memory dictionaries or file system storage.
6
- It supports network-based retrieval for missing data.
7
- """
8
-
9
- import threading
10
- import time
11
- from pathlib import Path
12
- from typing import Optional, Dict, Set, Tuple, List, Any
13
-
14
-
15
- class Storage:
16
- def __init__(self, config: dict):
17
- self.max_space = config.get('max_storage_space', 1024 * 1024 * 1024) # Default 1GB
18
- self.current_space = 0
19
-
20
- # Check if storage_path is provided in config
21
- storage_path = config.get('storage_path')
22
- self.use_memory_storage = storage_path is None
23
-
24
- # Initialize in-memory storage if no path provided
25
- self.memory_storage = {} if self.use_memory_storage else None
26
-
27
- # Only create storage path if not using memory storage
28
- if not self.use_memory_storage:
29
- self.storage_path = Path(storage_path)
30
- self.storage_path.mkdir(parents=True, exist_ok=True)
31
- # Calculate current space usage
32
- self.current_space = sum(f.stat().st_size for f in self.storage_path.glob('*') if f.is_file())
33
-
34
- self.max_object_recursion = config.get('max_object_recursion', 50)
35
- self.network_request_timeout = config.get('network_request_timeout', 5.0) # Default 5 second timeout
36
- self.node = None # Will be set by the Node after initialization
37
-
38
- # In-progress requests tracking
39
- self.pending_requests = {} # hash -> (start_time, event)
40
- self.request_lock = threading.Lock()
41
-
42
-
43
-
44
- def put(self, data_hash: bytes, data: bytes) -> bool:
45
- """Store data with its hash. Returns True if successful, False if space limit exceeded."""
46
- data_size = len(data)
47
- if self.current_space + data_size > self.max_space:
48
- return False
49
-
50
- # If using memory storage, store in dictionary
51
- if self.use_memory_storage:
52
- if data_hash not in self.memory_storage:
53
- self.memory_storage[data_hash] = data
54
- self.current_space += data_size
55
- return True
56
-
57
- # Otherwise use file storage
58
- file_path = self.storage_path / data_hash.hex()
59
-
60
- # Don't store if already exists
61
- if file_path.exists():
62
- return True
63
-
64
- # Store the data
65
- file_path.write_bytes(data)
66
- self.current_space += data_size
67
-
68
- # If this was a pending request, mark it as complete
69
- with self.request_lock:
70
- if data_hash in self.pending_requests:
71
- _, event = self.pending_requests[data_hash]
72
- event.set() # Signal that the data is now available
73
-
74
- return True
75
-
76
- def _local_get(self, data_hash: bytes) -> Optional[bytes]:
77
- """Get data from local storage only, no network requests."""
78
- # If using memory storage, get from dictionary
79
- if self.use_memory_storage:
80
- return self.memory_storage.get(data_hash)
81
-
82
- # Otherwise use file storage
83
- file_path = self.storage_path / data_hash.hex()
84
- if file_path.exists():
85
- return file_path.read_bytes()
86
- return None
87
-
88
- def get(self, data_hash: bytes, timeout: Optional[float] = None) -> Optional[bytes]:
89
- """
90
- Retrieve data by its hash, with network fallback.
91
-
92
- This function will first check local storage. If not found and a node is attached,
93
- it will initiate a network request asynchronously.
94
-
95
- Args:
96
- data_hash: The hash of the data to retrieve
97
- timeout: Timeout in seconds to wait for network request, None for default
98
-
99
- Returns:
100
- The data bytes if found, None otherwise
101
- """
102
- if timeout is None:
103
- timeout = self.network_request_timeout
104
-
105
- # First check local storage
106
- local_data = self._local_get(data_hash)
107
- if local_data:
108
- return local_data
109
-
110
- # If no node is attached, we can't make network requests
111
- if self.node is None:
112
- return None
113
-
114
- # Check if there's already a pending request for this hash
115
- with self.request_lock:
116
- if data_hash in self.pending_requests:
117
- start_time, event = self.pending_requests[data_hash]
118
- # If this request has been going on too long, cancel it and start a new one
119
- elapsed = time.time() - start_time
120
- if elapsed > timeout:
121
- # Cancel the old request
122
- self.pending_requests.pop(data_hash)
123
- else:
124
- # Wait for the existing request to complete
125
- wait_time = timeout - elapsed
126
- else:
127
- # No existing request, create a new one
128
- event = threading.Event()
129
- self.pending_requests[data_hash] = (time.time(), event)
130
- # Start the actual network request in a separate thread
131
- threading.Thread(
132
- target=self._request_from_network,
133
- args=(data_hash,),
134
- daemon=True
135
- ).start()
136
- wait_time = timeout
137
-
138
- # Wait for the request to complete or timeout
139
- if event.wait(wait_time):
140
- # Event was set, data should be available now
141
- with self.request_lock:
142
- if data_hash in self.pending_requests:
143
- self.pending_requests.pop(data_hash)
144
-
145
- # Check if data is now in local storage
146
- return self._local_get(data_hash)
147
- else:
148
- # Timed out waiting for data
149
- with self.request_lock:
150
- if data_hash in self.pending_requests:
151
- self.pending_requests.pop(data_hash)
152
- return None
153
-
154
- def _request_from_network(self, data_hash: bytes):
155
- """
156
- Request object from the network.
157
- This is meant to be run in a separate thread.
158
-
159
- Args:
160
- data_hash: The hash of the object to request
161
- """
162
- try:
163
- if hasattr(self.node, 'request_object'):
164
- # Use the node's request_object method
165
- self.node.request_object(data_hash)
166
- # Note: We don't need to return anything or signal completion here
167
- # The put() method will signal completion when the object is received
168
- except Exception as e:
169
- print(f"Error requesting object {data_hash.hex()} from network: {e}")
170
-
171
- def contains(self, data_hash: bytes) -> bool:
172
- """Check if data exists in storage."""
173
- if self.use_memory_storage:
174
- return data_hash in self.memory_storage
175
- return (self.storage_path / data_hash.hex()).exists()
176
-
177
- def get_recursive(self, root_hash: bytes, max_depth: Optional[int] = None,
178
- timeout: Optional[float] = None) -> Dict[bytes, bytes]:
179
- """
180
- Recursively retrieve all objects starting from a root hash.
181
-
182
- Objects not found locally will be requested from the network.
183
- This method will continue processing objects that are available
184
- while waiting for network responses.
185
-
186
- Args:
187
- root_hash: The hash of the root object
188
- max_depth: Maximum recursion depth, defaults to self.max_object_recursion
189
- timeout: Time to wait for each object request, None for default
190
-
191
- Returns:
192
- Dictionary mapping object hashes to their data
193
- """
194
- if max_depth is None:
195
- max_depth = self.max_object_recursion
196
-
197
- if timeout is None:
198
- timeout = self.network_request_timeout
199
-
200
- # Start with the root object
201
- objects = {}
202
- pending_queue = [(root_hash, 0)] # (hash, depth)
203
- processed = set()
204
-
205
- # Process objects in the queue
206
- while pending_queue:
207
- current_hash, current_depth = pending_queue.pop(0)
208
-
209
- # Skip if already processed or too deep
210
- if current_hash in processed or current_depth > max_depth:
211
- continue
212
-
213
- processed.add(current_hash)
214
-
215
- # Try to get the object (which may start a network request)
216
- obj_data = self.get(current_hash, timeout)
217
- if obj_data is None:
218
- # Object not found, continue with other objects
219
- continue
220
-
221
- # Store the object
222
- objects[current_hash] = obj_data
223
-
224
- # Queue child objects if not at max depth
225
- if current_depth < max_depth:
226
- # Try to detect child objects in the data
227
- # This depends on the data format, so this is just a placeholder
228
- # In a real implementation, you would parse the data based on its format
229
- # and extract references to other objects
230
- child_hashes = self._extract_child_hashes(obj_data)
231
- for child_hash in child_hashes:
232
- pending_queue.append((child_hash, current_depth + 1))
233
-
234
- return objects
235
-
236
- def _extract_child_hashes(self, data: bytes) -> List[bytes]:
237
- """
238
- Extract child object hashes from object data.
239
- This is a placeholder method that should be overridden or adapted based on the object format.
240
-
241
- Args:
242
- data: The object data
243
-
244
- Returns:
245
- List of child object hashes
246
- """
247
- # In a real implementation, this would parse the data based on its format
248
- # and extract references to other objects
249
- # For example, if the data is a serialized Merkle node, you might extract
250
- # left and right child hashes
251
-
252
- # For now, return an empty list
253
- return []
@@ -1,137 +0,0 @@
1
- """
2
- Utility functions for working with the storage module and Merkle trees,
3
- with special focus on validator stake operations and binary searches.
4
- """
5
-
6
- from typing import List, Dict, Optional, Tuple, Any, Callable, TypeVar
7
- from .merkle import MerkleTree, MerkleProof, MerkleNode, MerkleNodeType
8
- from .merkle import find_first, find_all, map, binary_search
9
-
10
- T = TypeVar('T')
11
-
12
-
13
- def create_ordered_merkle_tree(items: List[bytes], storage=None) -> Tuple[bytes, MerkleTree]:
14
- """
15
- Create a new ordered Merkle tree from items.
16
-
17
- Args:
18
- items: List of items to include in the tree, will be sorted
19
- storage: Optional storage backend to persist the tree
20
-
21
- Returns:
22
- Tuple of (root_hash, merkle_tree)
23
- """
24
- tree = MerkleTree(storage)
25
- root_hash = tree.add_sorted(items)
26
- return root_hash, tree
27
-
28
-
29
- def query_validator_stake(storage, stake_root_hash: bytes, validator_address: bytes) -> Optional[int]:
30
- """
31
- Query a validator's stake by their address from a stake Merkle tree.
32
-
33
- Args:
34
- storage: Storage instance used by the Merkle tree
35
- stake_root_hash: Root hash of the stake Merkle tree
36
- validator_address: Address of the validator to look up
37
-
38
- Returns:
39
- The validator's stake amount as an integer, or None if not found
40
- """
41
- # Define a comparison function for binary search (assuming address is first part of data)
42
- def compare_address(data: bytes) -> int:
43
- # Extract address from data (format depends on how stakes are stored)
44
- # Assuming format: [address][stake]
45
- data_address = data[:len(validator_address)]
46
-
47
- if data_address < validator_address:
48
- return 1 # Data is less than target
49
- elif data_address > validator_address:
50
- return -1 # Data is greater than target
51
- else:
52
- return 0 # Match found
53
-
54
- # Binary search for the validator's stake
55
- stake_data = binary_search(storage, stake_root_hash, compare_address)
56
-
57
- if stake_data:
58
- # Extract stake amount from data
59
- # Assuming format: [address][stake_amount as 8-byte integer]
60
- address_len = len(validator_address)
61
- stake_amount = int.from_bytes(stake_data[address_len:address_len+8], byteorder='big')
62
- return stake_amount
63
-
64
- return None
65
-
66
-
67
- def find_validator_stakes(storage, stake_root_hash: bytes, min_stake: int = 0) -> List[Tuple[bytes, int]]:
68
- """
69
- Find all validators with stakes above a minimum threshold.
70
-
71
- Args:
72
- storage: Storage instance used by the Merkle tree
73
- stake_root_hash: Root hash of the stake Merkle tree
74
- min_stake: Minimum stake threshold (default: 0)
75
-
76
- Returns:
77
- List of (validator_address, stake_amount) tuples
78
- """
79
- # Define predicate to filter validators by minimum stake
80
- def has_min_stake(data: bytes) -> bool:
81
- # Assuming format: [address][stake_amount as 8-byte integer]
82
- address_len = len(data) - 8 # Adjust based on your actual format
83
- stake_amount = int.from_bytes(data[address_len:], byteorder='big')
84
- return stake_amount >= min_stake
85
-
86
- # Define transform to extract address and stake
87
- def extract_address_and_stake(data: bytes) -> Tuple[bytes, int]:
88
- # Assuming format: [address][stake_amount as 8-byte integer]
89
- address_len = len(data) - 8 # Adjust based on your actual format
90
- address = data[:address_len]
91
- stake = int.from_bytes(data[address_len:], byteorder='big')
92
- return (address, stake)
93
-
94
- # Find all validators meeting criteria and transform results
95
- matching_validators = find_all(storage, stake_root_hash, has_min_stake)
96
- return [extract_address_and_stake(data) for data in matching_validators]
97
-
98
-
99
- def get_total_stake(storage, stake_root_hash: bytes) -> int:
100
- """
101
- Calculate the total stake across all validators.
102
-
103
- Args:
104
- storage: Storage instance used by the Merkle tree
105
- stake_root_hash: Root hash of the stake Merkle tree
106
-
107
- Returns:
108
- Total stake amount
109
- """
110
- # Define transform to extract stake amount
111
- def extract_stake(data: bytes) -> int:
112
- # Assuming format: [address][stake_amount as 8-byte integer]
113
- address_len = len(data) - 8 # Adjust based on your actual format
114
- return int.from_bytes(data[address_len:], byteorder='big')
115
-
116
- # Map all leaves to their stake values and sum
117
- stakes = map(storage, stake_root_hash, extract_stake)
118
- return sum(stakes)
119
-
120
-
121
- def query_with_custom_resolver(storage, root_hash: bytes,
122
- resolver_fn: Callable[[bytes], T]) -> T:
123
- """
124
- Query a Merkle tree using a custom resolver function.
125
-
126
- This is a general-purpose function that allows custom logic to be applied
127
- to tree traversal and data extraction.
128
-
129
- Args:
130
- storage: Storage instance used by the Merkle tree
131
- root_hash: Root hash of the Merkle tree
132
- resolver_fn: Function that takes a root hash and returns a result
133
-
134
- Returns:
135
- Whatever the resolver function returns
136
- """
137
- return resolver_fn(root_hash)
astreum/_node/utils.py DELETED
@@ -1,34 +0,0 @@
1
- """
2
- Utility functions for the Astreum blockchain.
3
- """
4
-
5
- import blake3
6
-
7
- def blake3_hash(data: bytes) -> bytes:
8
- """
9
- Hash data using BLAKE3.
10
-
11
- Args:
12
- data: Data to hash
13
-
14
- Returns:
15
- 32-byte BLAKE3 hash
16
- """
17
- return blake3.blake3(data).digest()
18
-
19
- def hash_object(obj) -> bytes:
20
- """
21
- Hash a Python object by converting it to a string and then hashing.
22
-
23
- Args:
24
- obj: Python object to hash
25
-
26
- Returns:
27
- 32-byte BLAKE3 hash
28
- """
29
- if isinstance(obj, bytes):
30
- return hash_data(obj)
31
- elif isinstance(obj, str):
32
- return hash_data(obj.encode('utf-8'))
33
- else:
34
- return hash_data(str(obj).encode('utf-8'))
File without changes
File without changes
@@ -1,98 +0,0 @@
1
- """
2
- Block creation functionality for the Astreum blockchain.
3
- """
4
-
5
- import time
6
- from typing import Dict, List, Optional, Tuple, Set
7
-
8
- from .model import Block
9
- from ..account import Account
10
- from ...models import Transaction
11
- from ...utils import hash_data
12
-
13
- def create_block(
14
- number: int,
15
- validator: bytes,
16
- previous: bytes,
17
- transactions: List[Transaction],
18
- timestamp: Optional[int] = None,
19
- vdf: Optional[bytes] = None,
20
- signature: Optional[bytes] = None,
21
- proof: Optional[bytes] = None,
22
- receipts: Optional[List] = None,
23
- data: Optional[bytes] = None,
24
- delay: Optional[int] = None,
25
- difficulty: Optional[int] = None,
26
- accounts: Optional[Dict] = None,
27
- chain: Optional[bytes] = None
28
- ) -> Block:
29
- """
30
- Create a new block.
31
-
32
- Args:
33
- number: Block number
34
- validator: Address of block validator
35
- previous: Hash of previous block
36
- transactions: List of transactions to include
37
- timestamp: Block timestamp (defaults to current time)
38
- vdf: VDF proof
39
- signature: Block signature
40
- proof: Additional proof data
41
- receipts: Transaction receipts
42
- data: Additional block data
43
- delay: Block delay
44
- difficulty: Block difficulty
45
- accounts: Accounts state
46
- chain: Chain identifier
47
-
48
- Returns:
49
- New Block object
50
- """
51
- # Use current time if timestamp not provided
52
- if timestamp is None:
53
- timestamp = int(time.time())
54
-
55
- # Create and return a new Block object
56
- return Block(
57
- number=number,
58
- validator=validator,
59
- previous=previous,
60
- transactions=transactions,
61
- timestamp=timestamp,
62
- vdf=vdf,
63
- signature=signature,
64
- proof=proof,
65
- receipts=receipts,
66
- data=data,
67
- delay=delay,
68
- difficulty=difficulty,
69
- accounts=accounts,
70
- chain=chain
71
- )
72
-
73
- def create_genesis_block(validator_address: bytes) -> Block:
74
- """
75
- Create the genesis block.
76
-
77
- Args:
78
- validator_address: Address of the genesis block validator
79
-
80
- Returns:
81
- Genesis Block object
82
- """
83
- return create_block(
84
- number=0,
85
- validator=validator_address,
86
- previous=None,
87
- transactions=[],
88
- timestamp=int(time.time()),
89
- vdf=None,
90
- signature=None,
91
- proof=None,
92
- receipts=[],
93
- data=None,
94
- delay=0,
95
- difficulty=1,
96
- accounts={},
97
- chain=hash_data(b"genesis")
98
- )