astreum 0.3.1__py3-none-any.whl → 0.3.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astreum/__init__.py +4 -2
- astreum/communication/handlers/handshake.py +62 -83
- astreum/communication/handlers/object_request.py +176 -0
- astreum/communication/handlers/object_response.py +115 -0
- astreum/communication/handlers/ping.py +6 -20
- astreum/communication/handlers/route_request.py +76 -0
- astreum/communication/handlers/route_response.py +53 -0
- astreum/communication/models/message.py +81 -58
- astreum/communication/models/peer.py +42 -14
- astreum/communication/models/route.py +2 -7
- astreum/communication/processors/__init__.py +0 -0
- astreum/communication/processors/incoming.py +98 -0
- astreum/communication/processors/outgoing.py +20 -0
- astreum/communication/processors/peer.py +59 -0
- astreum/communication/setup.py +39 -76
- astreum/communication/start.py +9 -10
- astreum/communication/util.py +7 -0
- astreum/consensus/start.py +9 -10
- astreum/consensus/validator.py +17 -8
- astreum/consensus/workers/discovery.py +6 -7
- astreum/consensus/workers/validation.py +334 -291
- astreum/consensus/workers/verify.py +8 -10
- astreum/crypto/chacha20poly1305.py +74 -0
- astreum/machine/evaluations/high_evaluation.py +237 -237
- astreum/machine/evaluations/low_evaluation.py +18 -18
- astreum/node.py +29 -7
- astreum/storage/actions/get.py +183 -69
- astreum/storage/actions/set.py +66 -20
- astreum/storage/requests.py +28 -0
- astreum/storage/setup.py +3 -25
- astreum/utils/config.py +76 -0
- {astreum-0.3.1.dist-info → astreum-0.3.16.dist-info}/METADATA +3 -3
- astreum-0.3.16.dist-info/RECORD +72 -0
- astreum/communication/handlers/storage_request.py +0 -81
- astreum-0.3.1.dist-info/RECORD +0 -62
- {astreum-0.3.1.dist-info → astreum-0.3.16.dist-info}/WHEEL +0 -0
- {astreum-0.3.1.dist-info → astreum-0.3.16.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.3.1.dist-info → astreum-0.3.16.dist-info}/top_level.txt +0 -0
|
@@ -64,21 +64,21 @@ def low_eval(self, code: List[bytes], meter: Meter) -> Expr:
|
|
|
64
64
|
pc += 1
|
|
65
65
|
|
|
66
66
|
# ---------- ADD ----------
|
|
67
|
-
if tok == b"add":
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
67
|
+
# if tok == b"add":
|
|
68
|
+
# if len(stack) < 2:
|
|
69
|
+
# return error_expr("low_eval", "underflow")
|
|
70
|
+
# b_b = stack.pop()
|
|
71
|
+
# a_b = stack.pop()
|
|
72
|
+
# a_i = tc_to_int(a_b)
|
|
73
|
+
# b_i = tc_to_int(b_b)
|
|
74
|
+
# res_i = a_i + b_i
|
|
75
|
+
# width = max(len(a_b), len(b_b), min_tc_width(res_i))
|
|
76
|
+
# res_b = int_to_tc(res_i, width)
|
|
77
|
+
# # charge for both operands' byte widths
|
|
78
|
+
# if not meter.charge_bytes(len(a_b) + len(b_b)):
|
|
79
|
+
# return error_expr("low_eval", "meter limit")
|
|
80
|
+
# stack.append(res_b)
|
|
81
|
+
# continue
|
|
82
82
|
|
|
83
83
|
# ---------- NAND ----------
|
|
84
84
|
|
|
@@ -153,7 +153,7 @@ def low_eval(self, code: List[bytes], meter: Meter) -> Expr:
|
|
|
153
153
|
if not meter.charge_bytes(len(slice_bytes)):
|
|
154
154
|
return error_expr("low_eval", "meter limit")
|
|
155
155
|
|
|
156
|
-
new_atom = Atom(data=slice_bytes, kind=atom.kind)
|
|
156
|
+
new_atom = Atom(data=slice_bytes, kind=atom.kind)
|
|
157
157
|
new_id = new_atom.object_id()
|
|
158
158
|
try:
|
|
159
159
|
self._hot_storage_set(key=new_id, value=new_atom)
|
|
@@ -205,7 +205,7 @@ def low_eval(self, code: List[bytes], meter: Meter) -> Expr:
|
|
|
205
205
|
if not meter.charge_bytes(len(joined)):
|
|
206
206
|
return error_expr("low_eval", "meter limit")
|
|
207
207
|
|
|
208
|
-
new_atom = Atom(data=joined, kind=AtomKind.BYTES)
|
|
208
|
+
new_atom = Atom(data=joined, kind=AtomKind.BYTES)
|
|
209
209
|
new_id = new_atom.object_id()
|
|
210
210
|
|
|
211
211
|
try:
|
|
@@ -236,7 +236,7 @@ def low_eval(self, code: List[bytes], meter: Meter) -> Expr:
|
|
|
236
236
|
if not meter.charge_bytes(len(data_b)):
|
|
237
237
|
return error_expr("low_eval", "meter limit")
|
|
238
238
|
|
|
239
|
-
new_atom = Atom(data=data_b, kind=kind)
|
|
239
|
+
new_atom = Atom(data=data_b, kind=kind)
|
|
240
240
|
new_id = new_atom.object_id()
|
|
241
241
|
|
|
242
242
|
try:
|
astreum/node.py
CHANGED
|
@@ -7,6 +7,12 @@ import uuid
|
|
|
7
7
|
from typing import Dict
|
|
8
8
|
|
|
9
9
|
from astreum.communication.start import connect_to_network_and_verify
|
|
10
|
+
from astreum.communication.models.peer import (
|
|
11
|
+
add_peer as peers_add_peer,
|
|
12
|
+
replace_peer as peers_replace_peer,
|
|
13
|
+
get_peer as peers_get_peer,
|
|
14
|
+
remove_peer as peers_remove_peer,
|
|
15
|
+
)
|
|
10
16
|
from astreum.consensus.start import process_blocks_and_transactions
|
|
11
17
|
from astreum.machine import Expr, high_eval, low_eval, script_eval
|
|
12
18
|
from astreum.machine.models.environment import Env, env_get, env_set
|
|
@@ -17,35 +23,41 @@ from astreum.storage.actions.get import (
|
|
|
17
23
|
_cold_storage_get,
|
|
18
24
|
_network_get,
|
|
19
25
|
storage_get,
|
|
26
|
+
local_get,
|
|
20
27
|
)
|
|
21
28
|
from astreum.storage.actions.set import (
|
|
22
29
|
_hot_storage_set,
|
|
23
30
|
_cold_storage_set,
|
|
24
31
|
_network_set,
|
|
25
32
|
)
|
|
33
|
+
from astreum.storage.requests import add_atom_req, has_atom_req, pop_atom_req
|
|
26
34
|
from astreum.storage.setup import storage_setup
|
|
35
|
+
from astreum.utils.config import config_setup
|
|
27
36
|
from astreum.utils.logging import logging_setup
|
|
28
37
|
|
|
29
38
|
|
|
30
39
|
class Node:
|
|
31
40
|
def __init__(self, config: dict = {}):
|
|
32
|
-
self.config = config
|
|
33
|
-
|
|
41
|
+
self.config = config_setup(config=config)
|
|
42
|
+
|
|
43
|
+
self.logger = logging_setup(self.config)
|
|
34
44
|
|
|
35
45
|
self.logger.info("Starting Astreum Node")
|
|
36
46
|
|
|
37
47
|
# Chain Configuration
|
|
38
|
-
|
|
39
|
-
self.chain = 1 if chain_str == "main" else 0
|
|
40
|
-
self.logger.info(f"Chain configured as: {chain_str} ({self.chain})")
|
|
48
|
+
self.logger.info(f"Chain configured as: {self.config["chain"]} ({self.config["chain_id"]})")
|
|
41
49
|
|
|
42
50
|
# Storage Setup
|
|
43
|
-
storage_setup(self, config=config)
|
|
51
|
+
storage_setup(self, config=self.config)
|
|
44
52
|
|
|
45
53
|
# Machine Setup
|
|
46
54
|
self.environments: Dict[uuid.UUID, Env] = {}
|
|
47
55
|
self.machine_environments_lock = threading.RLock()
|
|
48
|
-
|
|
56
|
+
self.is_connected = False
|
|
57
|
+
self.latest_block_hash = None
|
|
58
|
+
self.latest_block = None
|
|
59
|
+
self.nonce_time_ms = 0 # rolling measurement of last nonce search duration
|
|
60
|
+
|
|
49
61
|
connect = connect_to_network_and_verify
|
|
50
62
|
validate = process_blocks_and_transactions
|
|
51
63
|
|
|
@@ -68,6 +80,16 @@ class Node:
|
|
|
68
80
|
_network_set = _network_set
|
|
69
81
|
|
|
70
82
|
storage_get = storage_get
|
|
83
|
+
local_get = local_get
|
|
71
84
|
|
|
72
85
|
get_expr_list_from_storage = get_expr_list_from_storage
|
|
73
86
|
get_atom_list_from_storage = get_atom_list_from_storage
|
|
87
|
+
|
|
88
|
+
add_atom_req = add_atom_req
|
|
89
|
+
has_atom_req = has_atom_req
|
|
90
|
+
pop_atom_req = pop_atom_req
|
|
91
|
+
|
|
92
|
+
add_peer = peers_add_peer
|
|
93
|
+
replace_peer = peers_replace_peer
|
|
94
|
+
get_peer = peers_get_peer
|
|
95
|
+
remove_peer = peers_remove_peer
|
astreum/storage/actions/get.py
CHANGED
|
@@ -1,69 +1,183 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from pathlib import Path
|
|
4
|
-
from typing import Optional
|
|
5
|
-
|
|
6
|
-
from ..models.atom import Atom
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def _hot_storage_get(self, key: bytes) -> Optional[Atom]:
|
|
10
|
-
"""Retrieve an atom from in-memory cache while tracking hit statistics."""
|
|
11
|
-
|
|
12
|
-
atom
|
|
13
|
-
|
|
14
|
-
self.
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
return
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
try:
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from ..models.atom import Atom
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _hot_storage_get(self, key: bytes) -> Optional[Atom]:
|
|
10
|
+
"""Retrieve an atom from in-memory cache while tracking hit statistics."""
|
|
11
|
+
atom = self.hot_storage.get(key)
|
|
12
|
+
if atom is not None:
|
|
13
|
+
self.hot_storage_hits[key] = self.hot_storage_hits.get(key, 0) + 1
|
|
14
|
+
self.logger.debug("Hot storage hit for %s", key.hex())
|
|
15
|
+
else:
|
|
16
|
+
self.logger.debug("Hot storage miss for %s", key.hex())
|
|
17
|
+
return atom
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _network_get(self, key: bytes) -> Optional[Atom]:
|
|
21
|
+
"""Attempt to fetch an atom from network peers when local storage misses."""
|
|
22
|
+
if not getattr(self, "is_connected", False):
|
|
23
|
+
self.logger.debug("Network fetch skipped for %s; node not connected", key.hex())
|
|
24
|
+
return None
|
|
25
|
+
self.logger.debug("Attempting network fetch for %s", key.hex())
|
|
26
|
+
try:
|
|
27
|
+
from ...communication.handlers.object_request import (
|
|
28
|
+
ObjectRequest,
|
|
29
|
+
ObjectRequestType,
|
|
30
|
+
)
|
|
31
|
+
from ...communication.models.message import Message, MessageTopic
|
|
32
|
+
except Exception as exc:
|
|
33
|
+
self.logger.warning(
|
|
34
|
+
"Communication module unavailable; cannot fetch %s: %s",
|
|
35
|
+
key.hex(),
|
|
36
|
+
exc,
|
|
37
|
+
)
|
|
38
|
+
return None
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
closest_peer = self.peer_route.closest_peer_for_hash(key)
|
|
42
|
+
except Exception as exc:
|
|
43
|
+
self.logger.warning("Peer lookup failed for %s: %s", key.hex(), exc)
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
if closest_peer is None or closest_peer.address is None:
|
|
47
|
+
self.logger.debug("No peer available to fetch %s", key.hex())
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
obj_req = ObjectRequest(
|
|
51
|
+
type=ObjectRequestType.OBJECT_GET,
|
|
52
|
+
data=b"",
|
|
53
|
+
atom_id=key,
|
|
54
|
+
)
|
|
55
|
+
try:
|
|
56
|
+
message = Message(
|
|
57
|
+
topic=MessageTopic.OBJECT_REQUEST,
|
|
58
|
+
content=obj_req.to_bytes(),
|
|
59
|
+
sender=self.relay_public_key,
|
|
60
|
+
)
|
|
61
|
+
except Exception as exc:
|
|
62
|
+
self.logger.warning("Failed to build object request for %s: %s", key.hex(), exc)
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
# encrypt the outbound request for the target peer
|
|
66
|
+
message.encrypt(closest_peer.shared_key_bytes)
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
self.add_atom_req(key)
|
|
70
|
+
except Exception as exc:
|
|
71
|
+
self.logger.warning("Failed to track object request for %s: %s", key.hex(), exc)
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
self.outgoing_queue.put((message.to_bytes(), closest_peer.address))
|
|
75
|
+
self.logger.debug(
|
|
76
|
+
"Queued OBJECT_GET for %s to peer %s",
|
|
77
|
+
key.hex(),
|
|
78
|
+
closest_peer.address,
|
|
79
|
+
)
|
|
80
|
+
except Exception as exc:
|
|
81
|
+
self.logger.warning(
|
|
82
|
+
"Failed to queue OBJECT_GET for %s to %s: %s",
|
|
83
|
+
key.hex(),
|
|
84
|
+
closest_peer.address,
|
|
85
|
+
exc,
|
|
86
|
+
)
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def storage_get(self, key: bytes) -> Optional[Atom]:
|
|
91
|
+
"""Retrieve an Atom by checking local storage first, then the network."""
|
|
92
|
+
self.logger.debug("Fetching atom %s", key.hex())
|
|
93
|
+
atom = self._hot_storage_get(key)
|
|
94
|
+
if atom is not None:
|
|
95
|
+
self.logger.debug("Returning atom %s from hot storage", key.hex())
|
|
96
|
+
return atom
|
|
97
|
+
atom = self._cold_storage_get(key)
|
|
98
|
+
if atom is not None:
|
|
99
|
+
self.logger.debug("Returning atom %s from cold storage", key.hex())
|
|
100
|
+
return atom
|
|
101
|
+
|
|
102
|
+
if not self.is_connected:
|
|
103
|
+
return None
|
|
104
|
+
|
|
105
|
+
provider_payload = self.storage_index.get(key)
|
|
106
|
+
if provider_payload is not None:
|
|
107
|
+
try:
|
|
108
|
+
from ...communication.handlers.object_response import decode_object_provider
|
|
109
|
+
from ...communication.handlers.object_request import (
|
|
110
|
+
ObjectRequest,
|
|
111
|
+
ObjectRequestType,
|
|
112
|
+
)
|
|
113
|
+
from ...communication.models.message import Message, MessageTopic
|
|
114
|
+
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PublicKey
|
|
115
|
+
|
|
116
|
+
provider_key, provider_address, provider_port = decode_object_provider(provider_payload)
|
|
117
|
+
provider_public_key = X25519PublicKey.from_public_bytes(provider_key)
|
|
118
|
+
shared_key_bytes = self.relay_secret_key.exchange(provider_public_key)
|
|
119
|
+
|
|
120
|
+
obj_req = ObjectRequest(
|
|
121
|
+
type=ObjectRequestType.OBJECT_GET,
|
|
122
|
+
data=b"",
|
|
123
|
+
atom_id=key,
|
|
124
|
+
)
|
|
125
|
+
message = Message(
|
|
126
|
+
topic=MessageTopic.OBJECT_REQUEST,
|
|
127
|
+
content=obj_req.to_bytes(),
|
|
128
|
+
sender=self.relay_public_key,
|
|
129
|
+
)
|
|
130
|
+
message.encrypt(shared_key_bytes)
|
|
131
|
+
self.add_atom_req(key)
|
|
132
|
+
self.outgoing_queue.put((message.to_bytes(), (provider_address, provider_port)))
|
|
133
|
+
self.logger.debug(
|
|
134
|
+
"Requested atom %s from indexed provider %s:%s",
|
|
135
|
+
key.hex(),
|
|
136
|
+
provider_address,
|
|
137
|
+
provider_port,
|
|
138
|
+
)
|
|
139
|
+
except Exception as exc:
|
|
140
|
+
self.logger.warning("Failed indexed fetch for %s: %s", key.hex(), exc)
|
|
141
|
+
return None
|
|
142
|
+
|
|
143
|
+
self.logger.debug("Falling back to network fetch for %s", key.hex())
|
|
144
|
+
return self._network_get(key)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def local_get(self, key: bytes) -> Optional[Atom]:
|
|
148
|
+
"""Retrieve an Atom by checking only local hot and cold storage."""
|
|
149
|
+
self.logger.debug("Fetching atom %s (local only)", key.hex())
|
|
150
|
+
atom = self._hot_storage_get(key)
|
|
151
|
+
if atom is not None:
|
|
152
|
+
self.logger.debug("Returning atom %s from hot storage", key.hex())
|
|
153
|
+
return atom
|
|
154
|
+
atom = self._cold_storage_get(key)
|
|
155
|
+
if atom is not None:
|
|
156
|
+
self.logger.debug("Returning atom %s from cold storage", key.hex())
|
|
157
|
+
return atom
|
|
158
|
+
self.logger.debug("Local storage miss for %s", key.hex())
|
|
159
|
+
return None
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _cold_storage_get(self, key: bytes) -> Optional[Atom]:
|
|
163
|
+
"""Read an atom from the cold storage directory if configured."""
|
|
164
|
+
if not self.config["cold_storage_path"]:
|
|
165
|
+
self.logger.debug("Cold storage disabled; cannot fetch %s", key.hex())
|
|
166
|
+
return None
|
|
167
|
+
filename = f"{key.hex().upper()}.bin"
|
|
168
|
+
file_path = Path(self.config["cold_storage_path"]) / filename
|
|
169
|
+
try:
|
|
170
|
+
data = file_path.read_bytes()
|
|
171
|
+
except FileNotFoundError:
|
|
172
|
+
self.logger.debug("Cold storage miss for %s", key.hex())
|
|
173
|
+
return None
|
|
174
|
+
except OSError as exc:
|
|
175
|
+
self.logger.warning("Error reading cold storage file %s: %s", file_path, exc)
|
|
176
|
+
return None
|
|
177
|
+
try:
|
|
178
|
+
atom = Atom.from_bytes(data)
|
|
179
|
+
self.logger.debug("Loaded atom %s from cold storage", key.hex())
|
|
180
|
+
return atom
|
|
181
|
+
except ValueError as exc:
|
|
182
|
+
self.logger.warning("Cold storage data corrupted for %s: %s", file_path, exc)
|
|
183
|
+
return None
|
astreum/storage/actions/set.py
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import socket
|
|
3
4
|
from pathlib import Path
|
|
4
5
|
|
|
6
|
+
from cryptography.hazmat.primitives import serialization
|
|
7
|
+
|
|
5
8
|
from ..models.atom import Atom
|
|
6
9
|
|
|
7
10
|
|
|
@@ -9,11 +12,12 @@ def _hot_storage_set(self, key: bytes, value: Atom) -> bool:
|
|
|
9
12
|
"""Store atom in hot storage without exceeding the configured limit."""
|
|
10
13
|
node_logger = self.logger
|
|
11
14
|
projected = self.hot_storage_size + value.size
|
|
12
|
-
|
|
15
|
+
hot_limit = self.config["hot_storage_default_limit"]
|
|
16
|
+
if projected > hot_limit:
|
|
13
17
|
node_logger.warning(
|
|
14
18
|
"Hot storage limit reached (%s > %s); skipping atom %s",
|
|
15
19
|
projected,
|
|
16
|
-
|
|
20
|
+
hot_limit,
|
|
17
21
|
key.hex(),
|
|
18
22
|
)
|
|
19
23
|
return False
|
|
@@ -34,20 +38,21 @@ def _cold_storage_set(self, atom: Atom) -> None:
|
|
|
34
38
|
node_logger = self.logger
|
|
35
39
|
atom_id = atom.object_id()
|
|
36
40
|
atom_hex = atom_id.hex()
|
|
37
|
-
if not self.cold_storage_path:
|
|
41
|
+
if not self.config["cold_storage_path"]:
|
|
38
42
|
node_logger.debug("Cold storage disabled; skipping atom %s", atom_hex)
|
|
39
43
|
return
|
|
40
44
|
atom_bytes = atom.to_bytes()
|
|
41
45
|
projected = self.cold_storage_size + len(atom_bytes)
|
|
42
|
-
|
|
46
|
+
cold_limit = self.config["cold_storage_limit"]
|
|
47
|
+
if cold_limit and projected > cold_limit:
|
|
43
48
|
node_logger.warning(
|
|
44
49
|
"Cold storage limit reached (%s > %s); skipping atom %s",
|
|
45
50
|
projected,
|
|
46
|
-
|
|
51
|
+
cold_limit,
|
|
47
52
|
atom_hex,
|
|
48
53
|
)
|
|
49
54
|
return
|
|
50
|
-
directory = Path(self.cold_storage_path)
|
|
55
|
+
directory = Path(self.config["cold_storage_path"])
|
|
51
56
|
if not directory.exists():
|
|
52
57
|
node_logger.warning(
|
|
53
58
|
"Cold storage path %s missing; skipping atom %s",
|
|
@@ -76,6 +81,10 @@ def _network_set(self, atom: Atom) -> None:
|
|
|
76
81
|
atom_id = atom.object_id()
|
|
77
82
|
atom_hex = atom_id.hex()
|
|
78
83
|
try:
|
|
84
|
+
from ...communication.handlers.object_request import (
|
|
85
|
+
ObjectRequest,
|
|
86
|
+
ObjectRequestType,
|
|
87
|
+
)
|
|
79
88
|
from ...communication.models.message import Message, MessageTopic
|
|
80
89
|
except Exception as exc:
|
|
81
90
|
node_logger.warning(
|
|
@@ -85,16 +94,6 @@ def _network_set(self, atom: Atom) -> None:
|
|
|
85
94
|
)
|
|
86
95
|
return
|
|
87
96
|
|
|
88
|
-
try:
|
|
89
|
-
closest_peer = self.peer_route.closest_peer_for_hash(atom_id)
|
|
90
|
-
except Exception as exc:
|
|
91
|
-
node_logger.warning("Peer lookup failed for atom %s: %s", atom_hex, exc)
|
|
92
|
-
return
|
|
93
|
-
if closest_peer is None or closest_peer.address is None:
|
|
94
|
-
node_logger.debug("No peer available to advertise atom %s", atom_hex)
|
|
95
|
-
return
|
|
96
|
-
target_addr = closest_peer.address
|
|
97
|
-
|
|
98
97
|
try:
|
|
99
98
|
provider_ip, provider_port = self.incoming_socket.getsockname()[:2]
|
|
100
99
|
except Exception as exc:
|
|
@@ -105,15 +104,62 @@ def _network_set(self, atom: Atom) -> None:
|
|
|
105
104
|
)
|
|
106
105
|
return
|
|
107
106
|
|
|
108
|
-
provider_str = f"{provider_ip}:{int(provider_port)}"
|
|
109
107
|
try:
|
|
110
|
-
|
|
108
|
+
provider_ip_bytes = socket.inet_aton(provider_ip)
|
|
109
|
+
provider_port_bytes = int(provider_port).to_bytes(2, "big", signed=False)
|
|
110
|
+
provider_key_bytes = self.relay_public_key_bytes
|
|
111
111
|
except Exception as exc:
|
|
112
112
|
node_logger.warning("Unable to encode provider info for %s: %s", atom_hex, exc)
|
|
113
113
|
return
|
|
114
114
|
|
|
115
|
-
|
|
116
|
-
|
|
115
|
+
provider_payload = provider_key_bytes + provider_ip_bytes + provider_port_bytes
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
closest_peer = self.peer_route.closest_peer_for_hash(atom_id)
|
|
119
|
+
except Exception as exc:
|
|
120
|
+
node_logger.warning("Peer lookup failed for atom %s: %s", atom_hex, exc)
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
is_self_closest = False
|
|
124
|
+
if closest_peer is None or closest_peer.address is None:
|
|
125
|
+
is_self_closest = True
|
|
126
|
+
else:
|
|
127
|
+
try:
|
|
128
|
+
from ...communication.util import xor_distance
|
|
129
|
+
except Exception as exc:
|
|
130
|
+
node_logger.warning("Failed to import xor_distance for atom %s: %s", atom_hex, exc)
|
|
131
|
+
is_self_closest = True
|
|
132
|
+
else:
|
|
133
|
+
try:
|
|
134
|
+
self_distance = xor_distance(atom_id, self.relay_public_key_bytes)
|
|
135
|
+
peer_distance = xor_distance(atom_id, closest_peer.public_key_bytes)
|
|
136
|
+
except Exception as exc:
|
|
137
|
+
node_logger.warning("Failed computing distance for atom %s: %s", atom_hex, exc)
|
|
138
|
+
is_self_closest = True
|
|
139
|
+
else:
|
|
140
|
+
is_self_closest = self_distance <= peer_distance
|
|
141
|
+
|
|
142
|
+
if is_self_closest:
|
|
143
|
+
node_logger.debug("Self is closest; indexing provider for atom %s", atom_hex)
|
|
144
|
+
self.storage_index[atom_id] = provider_payload
|
|
145
|
+
return
|
|
146
|
+
|
|
147
|
+
target_addr = closest_peer.address
|
|
148
|
+
|
|
149
|
+
obj_req = ObjectRequest(
|
|
150
|
+
type=ObjectRequestType.OBJECT_PUT,
|
|
151
|
+
data=provider_payload,
|
|
152
|
+
atom_id=atom_id,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
message_body = obj_req.to_bytes()
|
|
156
|
+
|
|
157
|
+
message = Message(
|
|
158
|
+
topic=MessageTopic.OBJECT_REQUEST,
|
|
159
|
+
content=message_body,
|
|
160
|
+
sender=self.relay_public_key,
|
|
161
|
+
)
|
|
162
|
+
message.encrypt(closest_peer.shared_key_bytes)
|
|
117
163
|
try:
|
|
118
164
|
self.outgoing_queue.put((message.to_bytes(), target_addr))
|
|
119
165
|
node_logger.debug(
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from threading import RLock
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from .. import Node
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def add_atom_req(node: "Node", atom_id: bytes) -> None:
|
|
11
|
+
"""Mark an atom request as pending."""
|
|
12
|
+
with node.atom_requests_lock:
|
|
13
|
+
node.atom_requests.add(atom_id)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def has_atom_req(node: "Node", atom_id: bytes) -> bool:
|
|
17
|
+
"""Return True if the atom request is currently tracked."""
|
|
18
|
+
with node.atom_requests_lock:
|
|
19
|
+
return atom_id in node.atom_requests
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def pop_atom_req(node: "Node", atom_id: bytes) -> bool:
|
|
23
|
+
"""Remove the pending request if present. Returns True when removed."""
|
|
24
|
+
with node.atom_requests_lock:
|
|
25
|
+
if atom_id in node.atom_requests:
|
|
26
|
+
node.atom_requests.remove(atom_id)
|
|
27
|
+
return True
|
|
28
|
+
return False
|
astreum/storage/setup.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from pathlib import Path
|
|
4
3
|
from typing import Any
|
|
5
4
|
|
|
6
5
|
|
|
@@ -13,32 +12,11 @@ def storage_setup(node: Any, config: dict) -> None:
|
|
|
13
12
|
node.hot_storage_hits = {}
|
|
14
13
|
node.storage_index = {}
|
|
15
14
|
node.hot_storage_size = 0
|
|
16
|
-
hot_storage_default_limit = 1 << 30 # 1 GiB
|
|
17
|
-
hot_storage_limit_value = config.get("hot_storage_limit", hot_storage_default_limit)
|
|
18
|
-
try:
|
|
19
|
-
node.hot_storage_limit = int(hot_storage_limit_value)
|
|
20
|
-
except (TypeError, ValueError):
|
|
21
|
-
node.hot_storage_limit = hot_storage_default_limit
|
|
22
|
-
|
|
23
15
|
node.cold_storage_size = 0
|
|
24
|
-
cold_storage_default_limit = 10 << 30 # 10 GiB
|
|
25
|
-
cold_storage_limit_value = config.get("cold_storage_limit", cold_storage_default_limit)
|
|
26
|
-
try:
|
|
27
|
-
node.cold_storage_limit = int(cold_storage_limit_value)
|
|
28
|
-
except (TypeError, ValueError):
|
|
29
|
-
node.cold_storage_limit = cold_storage_default_limit
|
|
30
|
-
|
|
31
|
-
cold_storage_path = config.get("cold_storage_path")
|
|
32
|
-
if cold_storage_path:
|
|
33
|
-
try:
|
|
34
|
-
Path(cold_storage_path).mkdir(parents=True, exist_ok=True)
|
|
35
|
-
except OSError:
|
|
36
|
-
cold_storage_path = None
|
|
37
|
-
node.cold_storage_path = cold_storage_path
|
|
38
16
|
|
|
39
17
|
node.logger.info(
|
|
40
18
|
"Storage ready (hot_limit=%s bytes, cold_limit=%s bytes, cold_path=%s)",
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
19
|
+
config["hot_storage_default_limit"],
|
|
20
|
+
config["cold_storage_limit"],
|
|
21
|
+
config["cold_storage_path"] or "disabled",
|
|
44
22
|
)
|