astreum 0.3.16__py3-none-any.whl → 0.3.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astreum/__init__.py +1 -2
- astreum/communication/__init__.py +15 -11
- astreum/communication/difficulty.py +39 -0
- astreum/communication/disconnect.py +57 -0
- astreum/communication/handlers/handshake.py +105 -62
- astreum/communication/handlers/object_request.py +226 -138
- astreum/communication/handlers/object_response.py +118 -10
- astreum/communication/handlers/ping.py +9 -0
- astreum/communication/handlers/route_request.py +7 -1
- astreum/communication/handlers/route_response.py +7 -1
- astreum/communication/incoming_queue.py +96 -0
- astreum/communication/message_pow.py +36 -0
- astreum/communication/models/peer.py +4 -0
- astreum/communication/models/ping.py +27 -6
- astreum/communication/models/route.py +4 -0
- astreum/communication/{start.py → node.py} +10 -11
- astreum/communication/outgoing_queue.py +108 -0
- astreum/communication/processors/incoming.py +110 -37
- astreum/communication/processors/outgoing.py +35 -2
- astreum/communication/processors/peer.py +133 -58
- astreum/communication/setup.py +272 -113
- astreum/communication/util.py +14 -0
- astreum/machine/evaluations/low_evaluation.py +5 -5
- astreum/machine/models/expression.py +5 -5
- astreum/node.py +96 -87
- astreum/storage/actions/get.py +285 -183
- astreum/storage/actions/set.py +171 -156
- astreum/storage/models/atom.py +0 -14
- astreum/storage/models/trie.py +2 -2
- astreum/storage/providers.py +24 -0
- astreum/storage/requests.py +13 -10
- astreum/storage/setup.py +20 -15
- astreum/utils/config.py +260 -43
- astreum/utils/logging.py +1 -1
- astreum/{consensus → validation}/__init__.py +0 -4
- astreum/validation/constants.py +2 -0
- astreum/{consensus → validation}/genesis.py +4 -6
- astreum/{consensus → validation}/models/account.py +1 -1
- astreum/validation/models/block.py +544 -0
- astreum/validation/models/fork.py +511 -0
- astreum/{consensus → validation}/models/receipt.py +18 -5
- astreum/{consensus → validation}/models/transaction.py +50 -8
- astreum/validation/node.py +190 -0
- astreum/{consensus → validation}/validator.py +1 -1
- astreum/validation/workers/__init__.py +8 -0
- astreum/{consensus → validation}/workers/validation.py +360 -333
- astreum/verification/__init__.py +4 -0
- astreum/{consensus/workers/discovery.py → verification/discover.py} +1 -1
- astreum/verification/node.py +61 -0
- astreum/verification/worker.py +183 -0
- {astreum-0.3.16.dist-info → astreum-0.3.48.dist-info}/METADATA +45 -9
- astreum-0.3.48.dist-info/RECORD +79 -0
- astreum/consensus/models/block.py +0 -364
- astreum/consensus/models/chain.py +0 -66
- astreum/consensus/models/fork.py +0 -100
- astreum/consensus/setup.py +0 -83
- astreum/consensus/start.py +0 -67
- astreum/consensus/workers/__init__.py +0 -9
- astreum/consensus/workers/verify.py +0 -90
- astreum-0.3.16.dist-info/RECORD +0 -72
- /astreum/{consensus → validation}/models/__init__.py +0 -0
- /astreum/{consensus → validation}/models/accounts.py +0 -0
- {astreum-0.3.16.dist-info → astreum-0.3.48.dist-info}/WHEEL +0 -0
- {astreum-0.3.16.dist-info → astreum-0.3.48.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.3.16.dist-info → astreum-0.3.48.dist-info}/top_level.txt +0 -0
|
@@ -1,59 +1,134 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import time
|
|
4
|
-
from datetime import datetime, timedelta, timezone
|
|
5
|
-
from typing import TYPE_CHECKING, Any
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from datetime import datetime, timedelta, timezone
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
from ..models.message import Message
|
|
8
|
+
from ..outgoing_queue import enqueue_outgoing
|
|
9
|
+
from ..util import address_str_to_host_and_port
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from .. import Node
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _queue_bootstrap_handshakes(node: "Node") -> int:
|
|
16
|
+
relay_public_key = node.relay_public_key
|
|
17
|
+
|
|
18
|
+
bootstrap_peers = node.bootstrap_peers
|
|
19
|
+
if not bootstrap_peers:
|
|
20
|
+
return 0
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
incoming_port = int(node.config.get("incoming_port", 0))
|
|
24
|
+
content = incoming_port.to_bytes(2, "big", signed=False)
|
|
25
|
+
except (TypeError, ValueError, OverflowError):
|
|
26
|
+
return 0
|
|
27
|
+
|
|
28
|
+
handshake_message = Message(
|
|
29
|
+
handshake=True,
|
|
30
|
+
sender=relay_public_key,
|
|
31
|
+
content=content,
|
|
32
|
+
)
|
|
33
|
+
handshake_bytes = handshake_message.to_bytes()
|
|
34
|
+
sent = 0
|
|
35
|
+
for addr in bootstrap_peers:
|
|
36
|
+
try:
|
|
37
|
+
host, port = address_str_to_host_and_port(addr)
|
|
38
|
+
except Exception as exc:
|
|
39
|
+
node.logger.warning("Invalid bootstrap address %s: %s", addr, exc)
|
|
40
|
+
continue
|
|
21
41
|
try:
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
42
|
+
queued = enqueue_outgoing(
|
|
43
|
+
node,
|
|
44
|
+
(host, port),
|
|
45
|
+
message_bytes=handshake_bytes,
|
|
46
|
+
difficulty=1,
|
|
47
|
+
)
|
|
48
|
+
except Exception as exc:
|
|
49
|
+
node.logger.debug(
|
|
50
|
+
"Failed queueing bootstrap handshake to %s:%s: %s",
|
|
51
|
+
host,
|
|
52
|
+
port,
|
|
53
|
+
exc,
|
|
54
|
+
)
|
|
55
|
+
continue
|
|
56
|
+
if queued:
|
|
57
|
+
node.logger.info("Retrying bootstrap handshake to %s:%s", host, port)
|
|
58
|
+
sent += 1
|
|
59
|
+
else:
|
|
60
|
+
node.logger.debug(
|
|
61
|
+
"Bootstrap handshake queue rejected for %s:%s",
|
|
62
|
+
host,
|
|
63
|
+
port,
|
|
64
|
+
)
|
|
65
|
+
return sent
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def manage_peer(node: "Node") -> None:
|
|
69
|
+
"""Continuously evict peers whose timestamps exceed the configured timeout."""
|
|
70
|
+
node.logger.info(
|
|
71
|
+
"Peer manager started (timeout=%3ds, interval=%3ds)",
|
|
72
|
+
node.config["peer_timeout"],
|
|
73
|
+
node.config["peer_timeout_interval"],
|
|
74
|
+
)
|
|
75
|
+
stop = getattr(node, "communication_stop_event", None)
|
|
76
|
+
while stop is None or not stop.is_set():
|
|
77
|
+
timeout_seconds = node.config["peer_timeout"]
|
|
78
|
+
interval_seconds = node.config["peer_timeout_interval"]
|
|
79
|
+
try:
|
|
80
|
+
peers = getattr(node, "peers", None)
|
|
81
|
+
peer_route = getattr(node, "peer_route", None)
|
|
82
|
+
if not isinstance(peers, dict) or peer_route is None:
|
|
83
|
+
time.sleep(interval_seconds)
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
cutoff = datetime.now(timezone.utc) - timedelta(seconds=timeout_seconds)
|
|
87
|
+
stale_keys = []
|
|
88
|
+
with node.peers_lock:
|
|
89
|
+
for peer_key, peer in list(peers.items()):
|
|
90
|
+
if peer.timestamp < cutoff:
|
|
91
|
+
stale_keys.append(peer_key)
|
|
92
|
+
|
|
93
|
+
removed_count = 0
|
|
94
|
+
for peer_key in stale_keys:
|
|
95
|
+
removed = node.remove_peer(peer_key)
|
|
96
|
+
if removed is None:
|
|
97
|
+
continue
|
|
98
|
+
removed_count += 1
|
|
99
|
+
try:
|
|
100
|
+
peer_route.remove_peer(peer_key)
|
|
101
|
+
except Exception:
|
|
102
|
+
node.logger.debug(
|
|
103
|
+
"Unable to remove peer %s from route",
|
|
104
|
+
peer_key.hex(),
|
|
105
|
+
)
|
|
106
|
+
node.logger.debug(
|
|
107
|
+
"Evicted stale peer %s last seen at %s",
|
|
108
|
+
peer_key.hex(),
|
|
109
|
+
getattr(removed, "timestamp", None),
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if removed_count:
|
|
113
|
+
node.logger.info("Peer manager removed %s stale peer(s)", removed_count)
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
with node.peers_lock:
|
|
117
|
+
peer_count = len(peers)
|
|
118
|
+
except Exception:
|
|
119
|
+
peer_count = len(getattr(node, "peers", {}) or {})
|
|
120
|
+
if peer_count == 0:
|
|
121
|
+
bootstrap_interval = node.config.get("bootstrap_retry_interval", 0)
|
|
122
|
+
now = time.time()
|
|
123
|
+
last_attempt = getattr(node, "_bootstrap_last_attempt", 0.0)
|
|
124
|
+
if bootstrap_interval and (now - last_attempt) >= bootstrap_interval:
|
|
125
|
+
sent = _queue_bootstrap_handshakes(node)
|
|
126
|
+
if sent:
|
|
127
|
+
node._bootstrap_last_attempt = now
|
|
128
|
+
except Exception:
|
|
129
|
+
node.logger.exception("Peer manager iteration failed")
|
|
130
|
+
|
|
131
|
+
if stop is not None and stop.wait(interval_seconds):
|
|
132
|
+
break
|
|
133
|
+
|
|
134
|
+
node.logger.info("Peer manager stopped")
|
astreum/communication/setup.py
CHANGED
|
@@ -1,150 +1,303 @@
|
|
|
1
|
-
import socket, threading
|
|
2
|
-
from
|
|
3
|
-
from
|
|
4
|
-
from
|
|
5
|
-
from cryptography.hazmat.primitives
|
|
6
|
-
from cryptography.hazmat.primitives.asymmetric
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
1
|
+
import heapq, os, socket, threading, time
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from queue import Queue
|
|
4
|
+
from typing import Tuple, Optional, Set
|
|
5
|
+
from cryptography.hazmat.primitives import serialization
|
|
6
|
+
from cryptography.hazmat.primitives.asymmetric import ed25519
|
|
7
|
+
from cryptography.hazmat.primitives.asymmetric.x25519 import (
|
|
8
|
+
X25519PrivateKey,
|
|
9
|
+
X25519PublicKey,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
from typing import TYPE_CHECKING
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from .. import Node
|
|
15
|
+
|
|
16
|
+
from . import Route, Message
|
|
16
17
|
from .processors.incoming import (
|
|
17
18
|
process_incoming_messages,
|
|
18
19
|
populate_incoming_messages,
|
|
19
20
|
)
|
|
20
21
|
from .processors.outgoing import process_outgoing_messages
|
|
21
22
|
from .processors.peer import manage_peer
|
|
23
|
+
from .outgoing_queue import enqueue_outgoing
|
|
22
24
|
from .util import address_str_to_host_and_port
|
|
23
25
|
from ..utils.bytes import hex_to_bytes
|
|
24
|
-
|
|
25
|
-
def load_x25519(hex_key: Optional[str]) -> X25519PrivateKey:
|
|
26
|
-
"""DH key for relaying (always X25519)."""
|
|
27
|
-
if hex_key:
|
|
28
|
-
return X25519PrivateKey.from_private_bytes(bytes.fromhex(hex_key))
|
|
29
|
-
return X25519PrivateKey.generate()
|
|
30
|
-
|
|
31
|
-
def load_ed25519(hex_key: Optional[str]) -> Optional[ed25519.Ed25519PrivateKey]:
|
|
32
|
-
"""Signing key for validation (Ed25519), or None if absent."""
|
|
33
|
-
return ed25519.Ed25519PrivateKey.from_private_bytes(bytes.fromhex(hex_key)) \
|
|
34
|
-
if hex_key else None
|
|
35
|
-
|
|
36
|
-
def make_routes(
|
|
37
|
-
relay_pk: X25519PublicKey,
|
|
38
|
-
val_sk: Optional[ed25519.Ed25519PrivateKey]
|
|
39
|
-
) -> Tuple[Route, Optional[Route]]:
|
|
40
|
-
"""Peer route (DH pubkey) + optional validation route (ed pubkey)."""
|
|
41
|
-
peer_rt = Route(relay_pk)
|
|
42
|
-
val_rt = Route(val_sk.public_key()) if val_sk else None
|
|
43
|
-
return peer_rt, val_rt
|
|
44
|
-
|
|
26
|
+
|
|
27
|
+
def load_x25519(hex_key: Optional[str]) -> X25519PrivateKey:
|
|
28
|
+
"""DH key for relaying (always X25519)."""
|
|
29
|
+
if hex_key:
|
|
30
|
+
return X25519PrivateKey.from_private_bytes(bytes.fromhex(hex_key))
|
|
31
|
+
return X25519PrivateKey.generate()
|
|
32
|
+
|
|
33
|
+
def load_ed25519(hex_key: Optional[str]) -> Optional[ed25519.Ed25519PrivateKey]:
|
|
34
|
+
"""Signing key for validation (Ed25519), or None if absent."""
|
|
35
|
+
return ed25519.Ed25519PrivateKey.from_private_bytes(bytes.fromhex(hex_key)) \
|
|
36
|
+
if hex_key else None
|
|
37
|
+
|
|
38
|
+
def make_routes(
|
|
39
|
+
relay_pk: X25519PublicKey,
|
|
40
|
+
val_sk: Optional[ed25519.Ed25519PrivateKey]
|
|
41
|
+
) -> Tuple[Route, Optional[Route]]:
|
|
42
|
+
"""Peer route (DH pubkey) + optional validation route (ed pubkey)."""
|
|
43
|
+
peer_rt = Route(relay_pk)
|
|
44
|
+
val_rt = Route(val_sk.public_key()) if val_sk else None
|
|
45
|
+
return peer_rt, val_rt
|
|
46
|
+
|
|
45
47
|
def make_maps():
|
|
46
48
|
"""Empty lookup maps: peers and addresses."""
|
|
47
49
|
return
|
|
48
50
|
|
|
49
51
|
|
|
50
|
-
def
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
52
|
+
def _resolve_default_seed_ips(node: "Node", default_seed: Optional[str]) -> Set[str]:
|
|
53
|
+
if default_seed is None:
|
|
54
|
+
return set()
|
|
55
|
+
try:
|
|
56
|
+
host, port = address_str_to_host_and_port(default_seed)
|
|
57
|
+
except Exception as exc:
|
|
58
|
+
node.logger.warning("Invalid default seed %s: %s", default_seed, exc)
|
|
59
|
+
return set()
|
|
60
|
+
try:
|
|
61
|
+
infos = socket.getaddrinfo(host, port, type=socket.SOCK_DGRAM)
|
|
62
|
+
except Exception as exc:
|
|
63
|
+
node.logger.warning("Failed resolving default seed %s:%s: %s", host, port, exc)
|
|
64
|
+
return set()
|
|
65
|
+
resolved = {info[4][0] for info in infos if info[4]}
|
|
66
|
+
if resolved:
|
|
67
|
+
resolved_list = ", ".join(sorted(resolved))
|
|
68
|
+
node.logger.info("Default seed resolved to %s", resolved_list)
|
|
69
|
+
else:
|
|
70
|
+
node.logger.warning("No IPs resolved for default seed %s:%s", host, port)
|
|
71
|
+
return resolved
|
|
54
72
|
|
|
55
|
-
# key loading
|
|
56
|
-
node.relay_secret_key = load_x25519(config.get('relay_secret_key'))
|
|
57
|
-
node.validation_secret_key = load_ed25519(config.get('validation_secret_key'))
|
|
58
73
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
74
|
+
def advertise_cold_storage(node: "Node") -> None:
|
|
75
|
+
"""Advertise all cold storage atom ids to the closest known peer."""
|
|
76
|
+
node_logger = node.logger
|
|
77
|
+
cold_path = node.config.get("cold_storage_path")
|
|
78
|
+
if not cold_path:
|
|
79
|
+
node_logger.debug("Cold storage disabled; skipping cold atom advertisement")
|
|
80
|
+
return
|
|
81
|
+
advertise_limit = node.config.get("cold_storage_advertise_limit", 1000)
|
|
82
|
+
if advertise_limit == 0:
|
|
83
|
+
node_logger.debug(
|
|
84
|
+
"Cold storage advertisement disabled; skipping cold atom advertisement"
|
|
69
85
|
)
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
)
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
directory = Path(cold_path)
|
|
89
|
+
if not directory.exists():
|
|
90
|
+
node_logger.warning("Cold storage path %s missing; cannot advertise atoms", directory)
|
|
91
|
+
return
|
|
92
|
+
if not directory.is_dir():
|
|
93
|
+
node_logger.warning("Cold storage path %s is not a directory; skipping", directory)
|
|
94
|
+
return
|
|
95
|
+
|
|
96
|
+
advertised = 0
|
|
97
|
+
skipped = 0
|
|
98
|
+
if advertise_limit < 0:
|
|
99
|
+
for file_path in directory.glob("*.bin"):
|
|
100
|
+
if not file_path.is_file():
|
|
101
|
+
skipped += 1
|
|
102
|
+
continue
|
|
103
|
+
atom_hex = file_path.stem
|
|
104
|
+
if len(atom_hex) != 64:
|
|
105
|
+
skipped += 1
|
|
106
|
+
continue
|
|
107
|
+
try:
|
|
108
|
+
atom_id = bytes.fromhex(atom_hex)
|
|
109
|
+
except ValueError:
|
|
110
|
+
skipped += 1
|
|
111
|
+
continue
|
|
112
|
+
if len(atom_id) != 32:
|
|
113
|
+
skipped += 1
|
|
114
|
+
continue
|
|
115
|
+
node._network_set(atom_id)
|
|
116
|
+
advertised += 1
|
|
117
|
+
else:
|
|
118
|
+
heap = []
|
|
119
|
+
for entry in os.scandir(directory):
|
|
120
|
+
name = entry.name
|
|
121
|
+
if not name.endswith(".bin"):
|
|
122
|
+
continue
|
|
123
|
+
if not entry.is_file():
|
|
124
|
+
skipped += 1
|
|
125
|
+
continue
|
|
126
|
+
atom_hex = name[:-4]
|
|
127
|
+
if len(atom_hex) != 64:
|
|
128
|
+
skipped += 1
|
|
129
|
+
continue
|
|
130
|
+
try:
|
|
131
|
+
atom_id = bytes.fromhex(atom_hex)
|
|
132
|
+
except ValueError:
|
|
133
|
+
skipped += 1
|
|
134
|
+
continue
|
|
135
|
+
if len(atom_id) != 32:
|
|
136
|
+
skipped += 1
|
|
137
|
+
continue
|
|
138
|
+
try:
|
|
139
|
+
mtime = entry.stat().st_mtime
|
|
140
|
+
except OSError:
|
|
141
|
+
skipped += 1
|
|
142
|
+
continue
|
|
143
|
+
if len(heap) < advertise_limit:
|
|
144
|
+
heapq.heappush(heap, (mtime, atom_id))
|
|
145
|
+
else:
|
|
146
|
+
if mtime > heap[0][0]:
|
|
147
|
+
heapq.heapreplace(heap, (mtime, atom_id))
|
|
148
|
+
for _, atom_id in sorted(heap, key=lambda item: item[0], reverse=True):
|
|
149
|
+
node._network_set(atom_id)
|
|
150
|
+
advertised += 1
|
|
151
|
+
|
|
152
|
+
node_logger.info(
|
|
153
|
+
"Cold storage advertisement complete (advertised=%s, skipped=%s)",
|
|
154
|
+
advertised,
|
|
155
|
+
skipped,
|
|
76
156
|
)
|
|
77
157
|
|
|
78
|
-
# connection state & atom request tracking
|
|
79
|
-
node.is_connected = False
|
|
80
|
-
node.atom_requests = set()
|
|
81
|
-
node.atom_requests_lock = threading.RLock()
|
|
82
158
|
|
|
83
|
-
|
|
84
|
-
|
|
159
|
+
def manage_storage_index(node: "Node") -> None:
|
|
160
|
+
interval = node.config.get("storage_index_interval", 0)
|
|
161
|
+
if not interval:
|
|
162
|
+
node.logger.info("Storage index advertiser disabled")
|
|
163
|
+
return
|
|
164
|
+
node.logger.info("Storage index advertiser started (interval=%ss)", interval)
|
|
165
|
+
stop = getattr(node, "communication_stop_event", None)
|
|
166
|
+
while stop is None or not stop.is_set():
|
|
167
|
+
if stop is not None and stop.wait(interval):
|
|
168
|
+
break
|
|
169
|
+
try:
|
|
170
|
+
advertise_cold_storage(node)
|
|
171
|
+
except Exception:
|
|
172
|
+
node.logger.exception("Storage index advertisement failed")
|
|
173
|
+
node.logger.info("Storage index advertiser stopped")
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def communication_setup(node: "Node", config: dict):
|
|
177
|
+
node.logger.info("Setting up node communication")
|
|
178
|
+
node.use_ipv6 = config.get('use_ipv6', False)
|
|
179
|
+
node.peers_lock = threading.RLock()
|
|
180
|
+
node.communication_stop_event = threading.Event()
|
|
181
|
+
default_seed = config.get("default_seed")
|
|
182
|
+
node.default_seed_ips = _resolve_default_seed_ips(node, default_seed)
|
|
183
|
+
|
|
184
|
+
# key loading
|
|
185
|
+
node.relay_secret_key = load_x25519(config.get('relay_secret_key'))
|
|
186
|
+
node.validation_secret_key = load_ed25519(config.get('validation_secret_key'))
|
|
187
|
+
|
|
188
|
+
# derive pubs + routes
|
|
189
|
+
node.relay_public_key = node.relay_secret_key.public_key()
|
|
190
|
+
node.relay_public_key_bytes = node.relay_public_key.public_bytes(
|
|
191
|
+
encoding=serialization.Encoding.Raw,
|
|
192
|
+
format=serialization.PublicFormat.Raw,
|
|
193
|
+
)
|
|
194
|
+
node.validation_public_key = (
|
|
195
|
+
node.validation_secret_key.public_key().public_bytes(
|
|
196
|
+
encoding=serialization.Encoding.Raw,
|
|
197
|
+
format=serialization.PublicFormat.Raw,
|
|
198
|
+
)
|
|
199
|
+
if node.validation_secret_key
|
|
200
|
+
else None
|
|
201
|
+
)
|
|
202
|
+
node.peer_route, node.validation_route = make_routes(
|
|
203
|
+
node.relay_public_key,
|
|
204
|
+
node.validation_secret_key
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
# connection state & atom request tracking
|
|
208
|
+
node.is_connected = False
|
|
209
|
+
node.atom_requests = {}
|
|
210
|
+
node.atom_requests_lock = threading.RLock()
|
|
211
|
+
|
|
212
|
+
# sockets + queues + threads
|
|
213
|
+
incoming_port = config.get("incoming_port")
|
|
214
|
+
if incoming_port is None:
|
|
215
|
+
raise ValueError("incoming_port must be configured before communication setup")
|
|
85
216
|
fam = socket.AF_INET6 if node.use_ipv6 else socket.AF_INET
|
|
86
217
|
node.incoming_socket = socket.socket(fam, socket.SOCK_DGRAM)
|
|
87
218
|
if node.use_ipv6:
|
|
88
219
|
node.incoming_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
|
|
89
|
-
node.incoming_socket.bind(("::" if node.use_ipv6 else "0.0.0.0", incoming_port
|
|
90
|
-
|
|
220
|
+
node.incoming_socket.bind(("::" if node.use_ipv6 else "0.0.0.0", incoming_port))
|
|
221
|
+
bound_port = node.incoming_socket.getsockname()[1]
|
|
222
|
+
if incoming_port != 0 and bound_port != incoming_port:
|
|
223
|
+
raise OSError(
|
|
224
|
+
f"incoming_port mismatch: requested {incoming_port}, got {bound_port}"
|
|
225
|
+
)
|
|
226
|
+
node.config["incoming_port"] = bound_port if incoming_port == 0 else incoming_port
|
|
227
|
+
node.incoming_socket.settimeout(0.5)
|
|
91
228
|
node.logger.info(
|
|
92
229
|
"Incoming UDP socket bound to %s:%s",
|
|
93
230
|
"::" if node.use_ipv6 else "0.0.0.0",
|
|
94
|
-
node.incoming_port,
|
|
231
|
+
node.config["incoming_port"],
|
|
95
232
|
)
|
|
96
233
|
node.incoming_queue = Queue()
|
|
234
|
+
node.incoming_queue_size = 0
|
|
235
|
+
node.incoming_queue_size_lock = threading.RLock()
|
|
236
|
+
node.incoming_queue_size_limit = node.config.get("incoming_queue_size_limit", 0)
|
|
237
|
+
node.incoming_queue_timeout = node.config.get("incoming_queue_timeout", 0)
|
|
97
238
|
node.incoming_populate_thread = threading.Thread(
|
|
98
239
|
target=populate_incoming_messages,
|
|
99
240
|
args=(node,),
|
|
100
241
|
daemon=True,
|
|
101
242
|
)
|
|
102
|
-
node.incoming_process_thread = threading.Thread(
|
|
103
|
-
target=process_incoming_messages,
|
|
104
|
-
args=(node,),
|
|
105
|
-
daemon=True,
|
|
106
|
-
)
|
|
107
|
-
node.incoming_populate_thread.start()
|
|
108
|
-
node.incoming_process_thread.start()
|
|
109
|
-
|
|
110
|
-
node.outgoing_socket = socket.socket(
|
|
111
|
-
socket.AF_INET6 if node.use_ipv6 else socket.AF_INET,
|
|
112
|
-
socket.SOCK_DGRAM,
|
|
113
|
-
)
|
|
243
|
+
node.incoming_process_thread = threading.Thread(
|
|
244
|
+
target=process_incoming_messages,
|
|
245
|
+
args=(node,),
|
|
246
|
+
daemon=True,
|
|
247
|
+
)
|
|
248
|
+
node.incoming_populate_thread.start()
|
|
249
|
+
node.incoming_process_thread.start()
|
|
250
|
+
|
|
251
|
+
node.outgoing_socket = socket.socket(
|
|
252
|
+
socket.AF_INET6 if node.use_ipv6 else socket.AF_INET,
|
|
253
|
+
socket.SOCK_DGRAM,
|
|
254
|
+
)
|
|
255
|
+
node.outgoing_socket.settimeout(0.5)
|
|
114
256
|
node.outgoing_queue = Queue()
|
|
257
|
+
node.outgoing_queue_size = 0
|
|
258
|
+
node.outgoing_queue_size_lock = threading.RLock()
|
|
259
|
+
node.outgoing_queue_size_limit = node.config.get("outgoing_queue_size_limit", 0)
|
|
260
|
+
node.outgoing_queue_timeout = node.config.get("outgoing_queue_timeout", 0)
|
|
115
261
|
|
|
116
262
|
node.outgoing_thread = threading.Thread(
|
|
117
263
|
target=process_outgoing_messages,
|
|
118
264
|
args=(node,),
|
|
119
265
|
daemon=True,
|
|
120
266
|
)
|
|
121
|
-
node.outgoing_thread.start()
|
|
122
|
-
|
|
123
|
-
# other workers & maps
|
|
124
|
-
# track atom requests we initiated; guarded by atom_requests_lock on the node
|
|
125
|
-
node.peer_manager_thread = threading.Thread(
|
|
126
|
-
target=manage_peer,
|
|
127
|
-
args=(node,),
|
|
128
|
-
daemon=True
|
|
267
|
+
node.outgoing_thread.start()
|
|
268
|
+
|
|
269
|
+
# other workers & maps
|
|
270
|
+
# track atom requests we initiated; guarded by atom_requests_lock on the node
|
|
271
|
+
node.peer_manager_thread = threading.Thread(
|
|
272
|
+
target=manage_peer,
|
|
273
|
+
args=(node,),
|
|
274
|
+
daemon=True
|
|
275
|
+
)
|
|
276
|
+
node.peer_manager_thread.start()
|
|
277
|
+
|
|
278
|
+
with node.peers_lock:
|
|
279
|
+
node.peers = {} # Dict[bytes,Peer]
|
|
280
|
+
|
|
281
|
+
latest_block_hex = config.get("latest_block_hash")
|
|
282
|
+
if latest_block_hex:
|
|
283
|
+
try:
|
|
284
|
+
node.latest_block_hash = hex_to_bytes(latest_block_hex, expected_length=32)
|
|
285
|
+
except Exception as exc:
|
|
286
|
+
node.logger.warning("Invalid latest_block_hash in config: %s", exc)
|
|
287
|
+
node.latest_block_hash = None
|
|
288
|
+
else:
|
|
289
|
+
node.latest_block_hash = None
|
|
290
|
+
|
|
291
|
+
node.logger.info(
|
|
292
|
+
"Communication ready (incoming_port=%s, outgoing_socket_initialized=%s, bootstrap_count=%s)",
|
|
293
|
+
node.config["incoming_port"],
|
|
294
|
+
node.outgoing_socket is not None,
|
|
295
|
+
len(node.bootstrap_peers),
|
|
129
296
|
)
|
|
130
|
-
node.
|
|
131
|
-
|
|
132
|
-
with node.peers_lock:
|
|
133
|
-
node.peers = {} # Dict[bytes,Peer]
|
|
134
|
-
|
|
135
|
-
latest_block_hex = config.get("latest_block_hash")
|
|
136
|
-
if latest_block_hex:
|
|
137
|
-
try:
|
|
138
|
-
node.latest_block_hash = hex_to_bytes(latest_block_hex, expected_length=32)
|
|
139
|
-
except Exception as exc:
|
|
140
|
-
node.logger.warning("Invalid latest_block_hash in config: %s", exc)
|
|
141
|
-
node.latest_block_hash = None
|
|
142
|
-
else:
|
|
143
|
-
node.latest_block_hash = None
|
|
297
|
+
node.is_connected = True
|
|
144
298
|
|
|
145
|
-
# bootstrap pings
|
|
146
|
-
|
|
147
|
-
for addr in bootstrap_peers:
|
|
299
|
+
# bootstrap pings (requires connected state for enqueue_outgoing)
|
|
300
|
+
for addr in node.bootstrap_peers:
|
|
148
301
|
try:
|
|
149
302
|
host, port = address_str_to_host_and_port(addr) # type: ignore[arg-type]
|
|
150
303
|
except Exception as exc:
|
|
@@ -156,13 +309,19 @@ def communication_setup(node: "Node", config: dict):
|
|
|
156
309
|
sender=node.relay_public_key,
|
|
157
310
|
content=int(node.config["incoming_port"]).to_bytes(2, "big", signed=False),
|
|
158
311
|
)
|
|
159
|
-
|
|
312
|
+
enqueue_outgoing(
|
|
313
|
+
node,
|
|
314
|
+
(host, port),
|
|
315
|
+
message=handshake_message,
|
|
316
|
+
difficulty=1,
|
|
317
|
+
)
|
|
160
318
|
node.logger.info("Sent bootstrap handshake to %s:%s", host, port)
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
319
|
+
if node.bootstrap_peers:
|
|
320
|
+
node._bootstrap_last_attempt = time.time()
|
|
321
|
+
advertise_cold_storage(node)
|
|
322
|
+
node.storage_index_thread = threading.Thread(
|
|
323
|
+
target=manage_storage_index,
|
|
324
|
+
args=(node,),
|
|
325
|
+
daemon=True,
|
|
167
326
|
)
|
|
168
|
-
node.
|
|
327
|
+
node.storage_index_thread.start()
|
astreum/communication/util.py
CHANGED
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
from typing import Tuple
|
|
2
2
|
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
from .. import Node
|
|
6
|
+
|
|
3
7
|
|
|
4
8
|
def address_str_to_host_and_port(address: str) -> Tuple[str, int]:
|
|
5
9
|
"""Parse `host:port` (or `[ipv6]:port`) into a tuple."""
|
|
@@ -47,3 +51,13 @@ def xor_distance(a: bytes, b: bytes) -> int:
|
|
|
47
51
|
if len(a) != len(b):
|
|
48
52
|
raise ValueError("xor distance requires operands of equal length")
|
|
49
53
|
return int.from_bytes(bytes(x ^ y for x, y in zip(a, b)), "big", signed=False)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def get_bootstrap_peers(node: "Node") -> list[str]:
|
|
57
|
+
default_seed = node.config["default_seed"]
|
|
58
|
+
additional_seeds = node.config["additional_seeds"]
|
|
59
|
+
peers = []
|
|
60
|
+
if default_seed is not None:
|
|
61
|
+
peers.append(default_seed)
|
|
62
|
+
peers.extend(additional_seeds)
|
|
63
|
+
return peers
|