astreum 0.3.16__py3-none-any.whl → 0.3.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astreum/__init__.py +1 -2
- astreum/communication/__init__.py +15 -11
- astreum/communication/difficulty.py +39 -0
- astreum/communication/disconnect.py +57 -0
- astreum/communication/handlers/handshake.py +105 -62
- astreum/communication/handlers/object_request.py +226 -138
- astreum/communication/handlers/object_response.py +118 -10
- astreum/communication/handlers/ping.py +9 -0
- astreum/communication/handlers/route_request.py +7 -1
- astreum/communication/handlers/route_response.py +7 -1
- astreum/communication/incoming_queue.py +96 -0
- astreum/communication/message_pow.py +36 -0
- astreum/communication/models/peer.py +4 -0
- astreum/communication/models/ping.py +27 -6
- astreum/communication/models/route.py +4 -0
- astreum/communication/{start.py → node.py} +10 -11
- astreum/communication/outgoing_queue.py +108 -0
- astreum/communication/processors/incoming.py +110 -37
- astreum/communication/processors/outgoing.py +35 -2
- astreum/communication/processors/peer.py +133 -58
- astreum/communication/setup.py +272 -113
- astreum/communication/util.py +14 -0
- astreum/machine/evaluations/low_evaluation.py +5 -5
- astreum/machine/models/expression.py +5 -5
- astreum/node.py +96 -87
- astreum/storage/actions/get.py +285 -183
- astreum/storage/actions/set.py +171 -156
- astreum/storage/models/atom.py +0 -14
- astreum/storage/models/trie.py +2 -2
- astreum/storage/providers.py +24 -0
- astreum/storage/requests.py +13 -10
- astreum/storage/setup.py +20 -15
- astreum/utils/config.py +260 -43
- astreum/utils/logging.py +1 -1
- astreum/{consensus → validation}/__init__.py +0 -4
- astreum/validation/constants.py +2 -0
- astreum/{consensus → validation}/genesis.py +4 -6
- astreum/{consensus → validation}/models/account.py +1 -1
- astreum/validation/models/block.py +544 -0
- astreum/validation/models/fork.py +511 -0
- astreum/{consensus → validation}/models/receipt.py +18 -5
- astreum/{consensus → validation}/models/transaction.py +50 -8
- astreum/validation/node.py +190 -0
- astreum/{consensus → validation}/validator.py +1 -1
- astreum/validation/workers/__init__.py +8 -0
- astreum/{consensus → validation}/workers/validation.py +360 -333
- astreum/verification/__init__.py +4 -0
- astreum/{consensus/workers/discovery.py → verification/discover.py} +1 -1
- astreum/verification/node.py +61 -0
- astreum/verification/worker.py +183 -0
- {astreum-0.3.16.dist-info → astreum-0.3.48.dist-info}/METADATA +45 -9
- astreum-0.3.48.dist-info/RECORD +79 -0
- astreum/consensus/models/block.py +0 -364
- astreum/consensus/models/chain.py +0 -66
- astreum/consensus/models/fork.py +0 -100
- astreum/consensus/setup.py +0 -83
- astreum/consensus/start.py +0 -67
- astreum/consensus/workers/__init__.py +0 -9
- astreum/consensus/workers/verify.py +0 -90
- astreum-0.3.16.dist-info/RECORD +0 -72
- /astreum/{consensus → validation}/models/__init__.py +0 -0
- /astreum/{consensus → validation}/models/accounts.py +0 -0
- {astreum-0.3.16.dist-info → astreum-0.3.48.dist-info}/WHEEL +0 -0
- {astreum-0.3.16.dist-info → astreum-0.3.48.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.3.16.dist-info → astreum-0.3.48.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Tuple
|
|
4
|
+
|
|
5
|
+
from blake3 import blake3
|
|
6
|
+
|
|
7
|
+
from .difficulty import message_difficulty
|
|
8
|
+
from .message_pow import NONCE_SIZE, _leading_zero_bits
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from .. import Node
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
INCOMING_QUEUE_ITEM_OVERHEAD_BYTES = 6
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def enqueue_incoming(
|
|
18
|
+
node: "Node",
|
|
19
|
+
address: Tuple[str, int],
|
|
20
|
+
payload: bytes,
|
|
21
|
+
) -> bool:
|
|
22
|
+
"""Enqueue an incoming UDP payload while tracking queued bytes.
|
|
23
|
+
Increments `node.incoming_queue_size` by `len(payload) + 6` and enforces
|
|
24
|
+
`node.incoming_queue_size_limit` (bytes) as a soft cap by dropping enqueues that
|
|
25
|
+
would exceed the limit. If `node.incoming_queue_timeout` is > 0, it waits up to
|
|
26
|
+
that many seconds (using `communication_stop_event.wait`) for space before dropping.
|
|
27
|
+
"""
|
|
28
|
+
required_difficulty = message_difficulty(node)
|
|
29
|
+
if len(payload) <= NONCE_SIZE:
|
|
30
|
+
node.logger.warning(
|
|
31
|
+
"Incoming payload too short for difficulty check (len=%s, required=%s)",
|
|
32
|
+
len(payload),
|
|
33
|
+
required_difficulty,
|
|
34
|
+
)
|
|
35
|
+
return False
|
|
36
|
+
|
|
37
|
+
nonce_bytes = payload[:NONCE_SIZE]
|
|
38
|
+
message_bytes = payload[NONCE_SIZE:]
|
|
39
|
+
digest = blake3(message_bytes + nonce_bytes).digest()
|
|
40
|
+
zeros = _leading_zero_bits(digest)
|
|
41
|
+
if zeros < required_difficulty:
|
|
42
|
+
node.logger.warning(
|
|
43
|
+
"Incoming payload failed difficulty check (zeros=%s required=%s bytes=%s)",
|
|
44
|
+
zeros,
|
|
45
|
+
required_difficulty,
|
|
46
|
+
len(payload),
|
|
47
|
+
)
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
accounted_size = len(payload) + INCOMING_QUEUE_ITEM_OVERHEAD_BYTES
|
|
51
|
+
timeout = float(node.incoming_queue_timeout or 0)
|
|
52
|
+
|
|
53
|
+
with node.incoming_queue_size_lock:
|
|
54
|
+
current_size = int(node.incoming_queue_size)
|
|
55
|
+
limit = int(node.incoming_queue_size_limit)
|
|
56
|
+
projected_size = current_size + accounted_size
|
|
57
|
+
if projected_size > limit:
|
|
58
|
+
if timeout <= 0:
|
|
59
|
+
node.logger.warning(
|
|
60
|
+
"Incoming queue size limit reached (%s > %s); dropping inbound payload (bytes=%s)",
|
|
61
|
+
projected_size,
|
|
62
|
+
limit,
|
|
63
|
+
len(payload),
|
|
64
|
+
)
|
|
65
|
+
return False
|
|
66
|
+
wait_for_space = True
|
|
67
|
+
else:
|
|
68
|
+
node.incoming_queue_size = projected_size
|
|
69
|
+
wait_for_space = False
|
|
70
|
+
|
|
71
|
+
if wait_for_space:
|
|
72
|
+
if node.communication_stop_event.wait(timeout):
|
|
73
|
+
return False
|
|
74
|
+
with node.incoming_queue_size_lock:
|
|
75
|
+
current_size = int(node.incoming_queue_size)
|
|
76
|
+
limit = int(node.incoming_queue_size_limit)
|
|
77
|
+
projected_size = current_size + accounted_size
|
|
78
|
+
if projected_size > limit:
|
|
79
|
+
node.logger.warning(
|
|
80
|
+
"Incoming queue still full after waiting %ss (%s > %s); dropping inbound payload (bytes=%s)",
|
|
81
|
+
timeout,
|
|
82
|
+
projected_size,
|
|
83
|
+
limit,
|
|
84
|
+
len(payload),
|
|
85
|
+
)
|
|
86
|
+
return False
|
|
87
|
+
node.incoming_queue_size = projected_size
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
node.incoming_queue.put((message_bytes, address, accounted_size))
|
|
91
|
+
except Exception:
|
|
92
|
+
with node.incoming_queue_size_lock:
|
|
93
|
+
node.incoming_queue_size = max(0, int(node.incoming_queue_size) - accounted_size)
|
|
94
|
+
raise
|
|
95
|
+
|
|
96
|
+
return True
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from blake3 import blake3
|
|
4
|
+
|
|
5
|
+
NONCE_SIZE = 8
|
|
6
|
+
MAX_MESSAGE_NONCE = (1 << (NONCE_SIZE * 8)) - 1
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _leading_zero_bits(buf: bytes) -> int:
|
|
10
|
+
"""Return the number of leading zero bits in the provided buffer."""
|
|
11
|
+
zeros = 0
|
|
12
|
+
for byte in buf:
|
|
13
|
+
if byte == 0:
|
|
14
|
+
zeros += 8
|
|
15
|
+
continue
|
|
16
|
+
zeros += 8 - int(byte).bit_length()
|
|
17
|
+
break
|
|
18
|
+
return zeros
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def calculate_message_nonce(message_bytes: bytes, difficulty: int) -> int:
|
|
22
|
+
"""Find a nonce such that blake3(message_bytes + nonce_bytes) meets difficulty.
|
|
23
|
+
|
|
24
|
+
message_bytes should exclude any nonce prefix that will be added on the wire.
|
|
25
|
+
"""
|
|
26
|
+
target = max(1, int(difficulty))
|
|
27
|
+
nonce = 0
|
|
28
|
+
message_bytes = bytes(message_bytes)
|
|
29
|
+
while True:
|
|
30
|
+
if nonce > MAX_MESSAGE_NONCE:
|
|
31
|
+
raise ValueError("nonce search exhausted")
|
|
32
|
+
nonce_bytes = int(nonce).to_bytes(NONCE_SIZE, "big", signed=False)
|
|
33
|
+
digest = blake3(message_bytes + nonce_bytes).digest()
|
|
34
|
+
if _leading_zero_bits(digest) >= target:
|
|
35
|
+
return nonce
|
|
36
|
+
nonce += 1
|
|
@@ -13,11 +13,15 @@ class Peer:
|
|
|
13
13
|
peer_public_key: X25519PublicKey,
|
|
14
14
|
latest_block: Optional[bytes] = None,
|
|
15
15
|
address: Optional[Tuple[str, int]] = None,
|
|
16
|
+
is_default_seed: bool = False,
|
|
17
|
+
difficulty: int = 1,
|
|
16
18
|
):
|
|
17
19
|
self.shared_key_bytes = node_secret_key.exchange(peer_public_key)
|
|
18
20
|
self.timestamp = datetime.now(timezone.utc)
|
|
19
21
|
self.latest_block = latest_block
|
|
22
|
+
self.difficulty = max(1, int(difficulty or 1))
|
|
20
23
|
self.address = address
|
|
24
|
+
self.is_default_seed = bool(is_default_seed)
|
|
21
25
|
self.public_key_bytes = peer_public_key.public_bytes(
|
|
22
26
|
encoding=serialization.Encoding.Raw,
|
|
23
27
|
format=serialization.PublicFormat.Raw,
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
|
+
from typing import Optional
|
|
4
5
|
|
|
5
6
|
|
|
6
7
|
class PingFormatError(ValueError):
|
|
@@ -10,24 +11,44 @@ class PingFormatError(ValueError):
|
|
|
10
11
|
@dataclass
|
|
11
12
|
class Ping:
|
|
12
13
|
is_validator: bool
|
|
13
|
-
|
|
14
|
+
difficulty: int
|
|
15
|
+
latest_block: Optional[bytes]
|
|
14
16
|
|
|
15
|
-
PAYLOAD_SIZE =
|
|
17
|
+
PAYLOAD_SIZE = 34
|
|
18
|
+
ZERO_BLOCK = b"\x00" * 32
|
|
16
19
|
|
|
17
20
|
def __post_init__(self) -> None:
|
|
18
|
-
|
|
21
|
+
self.difficulty = int(self.difficulty)
|
|
22
|
+
if self.difficulty < 1 or self.difficulty > 255:
|
|
23
|
+
raise ValueError("difficulty must be between 1 and 255")
|
|
24
|
+
if self.latest_block is None:
|
|
25
|
+
return
|
|
26
|
+
lb = bytes(self.latest_block)
|
|
19
27
|
if len(lb) != 32:
|
|
20
28
|
raise ValueError("latest_block must be exactly 32 bytes")
|
|
21
29
|
self.latest_block = lb
|
|
22
30
|
|
|
23
31
|
def to_bytes(self) -> bytes:
|
|
24
|
-
|
|
32
|
+
flag = b"\x01" if self.is_validator else b"\x00"
|
|
33
|
+
difficulty = bytes([self.difficulty])
|
|
34
|
+
latest_block = self.latest_block if self.latest_block is not None else self.ZERO_BLOCK
|
|
35
|
+
return flag + difficulty + latest_block
|
|
25
36
|
|
|
26
37
|
@classmethod
|
|
27
38
|
def from_bytes(cls, data: bytes) -> "Ping":
|
|
28
39
|
if len(data) != cls.PAYLOAD_SIZE:
|
|
29
|
-
raise PingFormatError("ping payload must be
|
|
40
|
+
raise PingFormatError("ping payload must be 34 bytes")
|
|
30
41
|
flag = data[0]
|
|
31
42
|
if flag not in (0, 1):
|
|
32
43
|
raise PingFormatError("ping validator flag must be 0 or 1")
|
|
33
|
-
|
|
44
|
+
difficulty = data[1]
|
|
45
|
+
if difficulty < 1:
|
|
46
|
+
raise PingFormatError("ping difficulty must be >= 1")
|
|
47
|
+
latest_block = data[2:]
|
|
48
|
+
if latest_block == cls.ZERO_BLOCK:
|
|
49
|
+
latest_block = None
|
|
50
|
+
return cls(
|
|
51
|
+
is_validator=bool(flag),
|
|
52
|
+
difficulty=difficulty,
|
|
53
|
+
latest_block=latest_block,
|
|
54
|
+
)
|
|
@@ -42,6 +42,8 @@ class Route:
|
|
|
42
42
|
|
|
43
43
|
def add_peer(self, peer_public_key: PeerKey, peer: Optional[Peer] = None):
|
|
44
44
|
peer_public_key_bytes = self._normalize_peer_key(peer_public_key)
|
|
45
|
+
if peer_public_key_bytes == self.relay_public_key_bytes:
|
|
46
|
+
return
|
|
45
47
|
bucket_idx = self._matching_leading_bits(self.relay_public_key_bytes, peer_public_key_bytes)
|
|
46
48
|
if len(self.buckets[bucket_idx]) < self.bucket_size:
|
|
47
49
|
bucket = self.buckets[bucket_idx]
|
|
@@ -52,6 +54,8 @@ class Route:
|
|
|
52
54
|
|
|
53
55
|
def remove_peer(self, peer_public_key: PeerKey):
|
|
54
56
|
peer_public_key_bytes = self._normalize_peer_key(peer_public_key)
|
|
57
|
+
if peer_public_key_bytes == self.relay_public_key_bytes:
|
|
58
|
+
return
|
|
55
59
|
bucket_idx = self._matching_leading_bits(self.relay_public_key_bytes, peer_public_key_bytes)
|
|
56
60
|
bucket = self.buckets.get(bucket_idx)
|
|
57
61
|
if not bucket:
|
|
@@ -1,19 +1,18 @@
|
|
|
1
|
-
|
|
1
|
+
|
|
2
|
+
def connect_node(self):
|
|
2
3
|
"""Initialize communication and consensus components, then load latest block state."""
|
|
4
|
+
if self.is_connected:
|
|
5
|
+
self.logger.debug("Node already connected; skipping communication setup")
|
|
6
|
+
return
|
|
7
|
+
|
|
3
8
|
self.logger.info("Starting communication and consensus setup")
|
|
4
9
|
try:
|
|
5
10
|
from astreum.communication import communication_setup # type: ignore
|
|
6
11
|
communication_setup(node=self, config=self.config)
|
|
7
12
|
self.logger.info("Communication setup completed")
|
|
8
|
-
except Exception:
|
|
9
|
-
self.logger.exception("Communication setup failed")
|
|
10
|
-
|
|
11
|
-
try:
|
|
12
|
-
from astreum.consensus import consensus_setup # type: ignore
|
|
13
|
-
consensus_setup(node=self, config=self.config)
|
|
14
|
-
self.logger.info("Consensus setup completed")
|
|
15
|
-
except Exception:
|
|
16
|
-
self.logger.exception("Consensus setup failed")
|
|
13
|
+
except Exception as exc:
|
|
14
|
+
self.logger.exception("Communication setup failed: %s", exc)
|
|
15
|
+
return exc
|
|
17
16
|
|
|
18
17
|
# Load latest_block_hash from config
|
|
19
18
|
self.latest_block_hash = getattr(self, "latest_block_hash", None)
|
|
@@ -30,7 +29,7 @@ def connect_to_network_and_verify(self):
|
|
|
30
29
|
|
|
31
30
|
if self.latest_block_hash and self.latest_block is None:
|
|
32
31
|
try:
|
|
33
|
-
from astreum.
|
|
32
|
+
from astreum.validation.models.block import Block
|
|
34
33
|
self.latest_block = Block.from_atom(self, self.latest_block_hash)
|
|
35
34
|
self.logger.info("Loaded latest block %s from storage", self.latest_block_hash.hex())
|
|
36
35
|
except Exception as exc:
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Optional, Tuple
|
|
4
|
+
|
|
5
|
+
from .message_pow import NONCE_SIZE, calculate_message_nonce
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from .models.message import Message
|
|
9
|
+
from .. import Node
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
OUTGOING_QUEUE_ITEM_OVERHEAD_BYTES = 6
|
|
13
|
+
def enqueue_outgoing(
|
|
14
|
+
node: "Node",
|
|
15
|
+
address: Tuple[str, int],
|
|
16
|
+
message: Optional["Message"] = None,
|
|
17
|
+
message_bytes: Optional[bytes] = None,
|
|
18
|
+
difficulty: int = 1,
|
|
19
|
+
) -> bool:
|
|
20
|
+
"""Enqueue an outgoing UDP payload while tracking queued bytes.
|
|
21
|
+
When used, it increments `node.outgoing_queue_size` by `len(payload) + 6` and enforces
|
|
22
|
+
`node.outgoing_queue_size_limit` (bytes) as a soft cap by dropping enqueues that
|
|
23
|
+
would exceed the limit. If `node.outgoing_queue_timeout` is > 0, it waits up to
|
|
24
|
+
that many seconds (using `communication_stop_event.wait`) for space before dropping.
|
|
25
|
+
"""
|
|
26
|
+
if not node.is_connected:
|
|
27
|
+
raise RuntimeError("node is not connected; call node.connect() (communication_setup) first")
|
|
28
|
+
|
|
29
|
+
if message is not None and message_bytes is not None:
|
|
30
|
+
raise ValueError("Specify only one of message or message_bytes")
|
|
31
|
+
|
|
32
|
+
if message_bytes is not None:
|
|
33
|
+
payload = message_bytes
|
|
34
|
+
elif message is not None:
|
|
35
|
+
payload = message.to_bytes()
|
|
36
|
+
else:
|
|
37
|
+
raise ValueError("Either message or message_bytes must be provided")
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
difficulty_value = int(difficulty)
|
|
41
|
+
except Exception:
|
|
42
|
+
difficulty_value = 1
|
|
43
|
+
if difficulty_value < 1:
|
|
44
|
+
difficulty_value = 1
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
nonce = calculate_message_nonce(payload, difficulty_value)
|
|
48
|
+
except Exception as exc:
|
|
49
|
+
node.logger.warning(
|
|
50
|
+
"Failed generating message nonce (difficulty=%s bytes=%s): %s",
|
|
51
|
+
difficulty_value,
|
|
52
|
+
len(payload),
|
|
53
|
+
exc,
|
|
54
|
+
)
|
|
55
|
+
return False
|
|
56
|
+
|
|
57
|
+
payload = int(nonce).to_bytes(NONCE_SIZE, "big", signed=False) + payload
|
|
58
|
+
|
|
59
|
+
accounted_size = len(payload) + OUTGOING_QUEUE_ITEM_OVERHEAD_BYTES
|
|
60
|
+
|
|
61
|
+
timeout = float(node.outgoing_queue_timeout or 0)
|
|
62
|
+
|
|
63
|
+
with node.outgoing_queue_size_lock:
|
|
64
|
+
current_size = int(node.outgoing_queue_size)
|
|
65
|
+
limit = int(node.outgoing_queue_size_limit)
|
|
66
|
+
projected_size = current_size + accounted_size
|
|
67
|
+
if projected_size > limit:
|
|
68
|
+
if timeout <= 0:
|
|
69
|
+
node.logger.warning(
|
|
70
|
+
"Outgoing queue size limit reached (%s > %s); dropping outbound payload (bytes=%s)",
|
|
71
|
+
projected_size,
|
|
72
|
+
limit,
|
|
73
|
+
len(payload),
|
|
74
|
+
)
|
|
75
|
+
return False
|
|
76
|
+
wait_for_space = True
|
|
77
|
+
else:
|
|
78
|
+
node.outgoing_queue_size = projected_size
|
|
79
|
+
wait_for_space = False
|
|
80
|
+
|
|
81
|
+
if wait_for_space:
|
|
82
|
+
if node.communication_stop_event.wait(timeout):
|
|
83
|
+
return False
|
|
84
|
+
if not node.is_connected:
|
|
85
|
+
return False
|
|
86
|
+
with node.outgoing_queue_size_lock:
|
|
87
|
+
current_size = int(node.outgoing_queue_size)
|
|
88
|
+
limit = int(node.outgoing_queue_size_limit)
|
|
89
|
+
projected_size = current_size + accounted_size
|
|
90
|
+
if limit and projected_size > limit:
|
|
91
|
+
node.logger.warning(
|
|
92
|
+
"Outgoing queue still full after waiting %ss (%s > %s); dropping outbound payload (bytes=%s)",
|
|
93
|
+
timeout,
|
|
94
|
+
projected_size,
|
|
95
|
+
limit,
|
|
96
|
+
len(payload),
|
|
97
|
+
)
|
|
98
|
+
return False
|
|
99
|
+
node.outgoing_queue_size = projected_size
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
node.outgoing_queue.put((payload, address, accounted_size))
|
|
103
|
+
except Exception:
|
|
104
|
+
with node.outgoing_queue_size_lock:
|
|
105
|
+
node.outgoing_queue_size = max(0, int(node.outgoing_queue_size) - accounted_size)
|
|
106
|
+
raise
|
|
107
|
+
|
|
108
|
+
return True
|
|
@@ -1,30 +1,56 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import socket
|
|
4
|
+
from queue import Empty
|
|
5
|
+
from typing import TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
from ..handlers.handshake import handle_handshake
|
|
6
8
|
from ..handlers.object_request import handle_object_request
|
|
7
9
|
from ..handlers.object_response import handle_object_response
|
|
8
10
|
from ..handlers.ping import handle_ping
|
|
9
11
|
from ..handlers.route_request import handle_route_request
|
|
10
12
|
from ..handlers.route_response import handle_route_response
|
|
13
|
+
from ..incoming_queue import enqueue_incoming
|
|
11
14
|
from ..models.message import Message, MessageTopic
|
|
12
15
|
from ..models.peer import Peer
|
|
16
|
+
from ..outgoing_queue import enqueue_outgoing
|
|
13
17
|
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PublicKey
|
|
14
18
|
|
|
15
19
|
if TYPE_CHECKING:
|
|
16
|
-
from .. import Node
|
|
17
|
-
|
|
18
|
-
|
|
20
|
+
from .. import Node
|
|
21
|
+
|
|
22
|
+
|
|
19
23
|
def process_incoming_messages(node: "Node") -> None:
|
|
20
24
|
"""Process incoming messages (placeholder)."""
|
|
21
|
-
|
|
25
|
+
stop = getattr(node, "communication_stop_event", None)
|
|
26
|
+
while stop is None or not stop.is_set():
|
|
22
27
|
try:
|
|
23
|
-
|
|
24
|
-
except
|
|
28
|
+
item = node.incoming_queue.get(timeout=0.5)
|
|
29
|
+
except Empty:
|
|
30
|
+
continue
|
|
31
|
+
except Exception:
|
|
25
32
|
node.logger.exception("Error taking from incoming queue")
|
|
26
33
|
continue
|
|
27
34
|
|
|
35
|
+
data = None
|
|
36
|
+
addr = None
|
|
37
|
+
accounted_size = None
|
|
38
|
+
|
|
39
|
+
if isinstance(item, tuple) and len(item) == 3:
|
|
40
|
+
data, addr, accounted_size = item
|
|
41
|
+
else:
|
|
42
|
+
node.logger.warning("Incoming queue item has unexpected shape: %r", item)
|
|
43
|
+
continue
|
|
44
|
+
|
|
45
|
+
if stop is not None and stop.is_set():
|
|
46
|
+
if accounted_size is not None:
|
|
47
|
+
try:
|
|
48
|
+
with node.incoming_queue_size_lock:
|
|
49
|
+
node.incoming_queue_size = max(0, node.incoming_queue_size - int(accounted_size))
|
|
50
|
+
except Exception:
|
|
51
|
+
node.logger.exception("Failed updating incoming_queue_size on shutdown")
|
|
52
|
+
break
|
|
53
|
+
|
|
28
54
|
try:
|
|
29
55
|
message = Message.from_bytes(data)
|
|
30
56
|
except Exception as exc:
|
|
@@ -34,7 +60,7 @@ def process_incoming_messages(node: "Node") -> None:
|
|
|
34
60
|
if message.handshake:
|
|
35
61
|
if handle_handshake(node, addr, message):
|
|
36
62
|
continue
|
|
37
|
-
|
|
63
|
+
|
|
38
64
|
peer = None
|
|
39
65
|
try:
|
|
40
66
|
peer = node.get_peer(message.sender_bytes)
|
|
@@ -44,10 +70,13 @@ def process_incoming_messages(node: "Node") -> None:
|
|
|
44
70
|
try:
|
|
45
71
|
peer_key = X25519PublicKey.from_public_bytes(message.sender_bytes)
|
|
46
72
|
host, port = addr[0], int(addr[1])
|
|
73
|
+
default_seed_ips = getattr(node, "default_seed_ips", None)
|
|
74
|
+
is_default_seed = bool(default_seed_ips) and host in default_seed_ips
|
|
47
75
|
peer = Peer(
|
|
48
76
|
node_secret_key=node.relay_secret_key,
|
|
49
77
|
peer_public_key=peer_key,
|
|
50
78
|
address=(host, port),
|
|
79
|
+
is_default_seed=is_default_seed,
|
|
51
80
|
)
|
|
52
81
|
except Exception:
|
|
53
82
|
peer = None
|
|
@@ -60,39 +89,83 @@ def process_incoming_messages(node: "Node") -> None:
|
|
|
60
89
|
try:
|
|
61
90
|
message.decrypt(peer.shared_key_bytes)
|
|
62
91
|
except Exception as exc:
|
|
63
|
-
node.logger.warning(
|
|
92
|
+
node.logger.warning(
|
|
93
|
+
"Error decrypting message from %s (len=%s, enc_len=%s, exc=%s)",
|
|
94
|
+
peer.address,
|
|
95
|
+
len(data),
|
|
96
|
+
len(message.encrypted) if message.encrypted is not None else None,
|
|
97
|
+
exc,
|
|
98
|
+
)
|
|
99
|
+
try:
|
|
100
|
+
host, port = addr[0], int(addr[1])
|
|
101
|
+
handshake_message = Message(
|
|
102
|
+
handshake=True,
|
|
103
|
+
sender=node.relay_public_key,
|
|
104
|
+
content=int(node.config["incoming_port"]).to_bytes(2, "big", signed=False),
|
|
105
|
+
)
|
|
106
|
+
enqueue_outgoing(
|
|
107
|
+
node,
|
|
108
|
+
(host, port),
|
|
109
|
+
message=handshake_message,
|
|
110
|
+
difficulty=1,
|
|
111
|
+
)
|
|
112
|
+
except Exception as handshake_exc:
|
|
113
|
+
node.logger.debug(
|
|
114
|
+
"Failed queueing rekey handshake to %s: %s",
|
|
115
|
+
addr,
|
|
116
|
+
handshake_exc,
|
|
117
|
+
)
|
|
64
118
|
continue
|
|
65
119
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
case MessageTopic.OBJECT_REQUEST:
|
|
71
|
-
handle_object_request(node, peer, message)
|
|
72
|
-
|
|
73
|
-
case MessageTopic.OBJECT_RESPONSE:
|
|
74
|
-
handle_object_response(node, peer, message)
|
|
120
|
+
try:
|
|
121
|
+
match message.topic:
|
|
122
|
+
case MessageTopic.PING:
|
|
123
|
+
handle_ping(node, peer, message.content)
|
|
75
124
|
|
|
76
|
-
|
|
77
|
-
|
|
125
|
+
case MessageTopic.OBJECT_REQUEST:
|
|
126
|
+
handle_object_request(node, peer, message)
|
|
78
127
|
|
|
79
|
-
|
|
80
|
-
|
|
128
|
+
case MessageTopic.OBJECT_RESPONSE:
|
|
129
|
+
handle_object_response(node, peer, message)
|
|
81
130
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
continue
|
|
85
|
-
node._validation_transaction_queue.put(message.content)
|
|
131
|
+
case MessageTopic.ROUTE_REQUEST:
|
|
132
|
+
handle_route_request(node, peer, message)
|
|
86
133
|
|
|
87
|
-
|
|
88
|
-
|
|
134
|
+
case MessageTopic.ROUTE_RESPONSE:
|
|
135
|
+
handle_route_response(node, peer, message)
|
|
89
136
|
|
|
137
|
+
case MessageTopic.TRANSACTION:
|
|
138
|
+
if node.validation_secret_key is None:
|
|
139
|
+
continue
|
|
140
|
+
node._validation_transaction_queue.put(message.content)
|
|
90
141
|
|
|
91
|
-
|
|
142
|
+
case _:
|
|
143
|
+
continue
|
|
144
|
+
finally:
|
|
145
|
+
if accounted_size is not None:
|
|
146
|
+
try:
|
|
147
|
+
with node.incoming_queue_size_lock:
|
|
148
|
+
node.incoming_queue_size = max(0, node.incoming_queue_size - int(accounted_size))
|
|
149
|
+
except Exception:
|
|
150
|
+
node.logger.exception("Failed updating incoming_queue_size")
|
|
151
|
+
|
|
152
|
+
node.logger.info("Incoming message processor stopped")
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def populate_incoming_messages(node: "Node") -> None:
|
|
92
156
|
"""Receive UDP packets and feed the incoming queue."""
|
|
93
|
-
|
|
157
|
+
stop = getattr(node, "communication_stop_event", None)
|
|
158
|
+
while stop is None or not stop.is_set():
|
|
94
159
|
try:
|
|
95
160
|
data, addr = node.incoming_socket.recvfrom(4096)
|
|
96
|
-
node
|
|
97
|
-
except
|
|
98
|
-
|
|
161
|
+
enqueue_incoming(node, addr, payload=data)
|
|
162
|
+
except socket.timeout:
|
|
163
|
+
continue
|
|
164
|
+
except OSError:
|
|
165
|
+
if stop is not None and stop.is_set():
|
|
166
|
+
break
|
|
167
|
+
node.logger.warning("Error populating incoming queue: socket closed")
|
|
168
|
+
except Exception as exc:
|
|
169
|
+
node.logger.warning("Error populating incoming queue: %s", exc)
|
|
170
|
+
|
|
171
|
+
node.logger.info("Incoming message populator stopped")
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from queue import Empty
|
|
3
4
|
from typing import TYPE_CHECKING, Tuple
|
|
4
5
|
|
|
5
6
|
if TYPE_CHECKING:
|
|
@@ -7,14 +8,46 @@ if TYPE_CHECKING:
|
|
|
7
8
|
|
|
8
9
|
def process_outgoing_messages(node: "Node") -> None:
|
|
9
10
|
"""Send queued outbound packets."""
|
|
10
|
-
|
|
11
|
+
stop = getattr(node, "communication_stop_event", None)
|
|
12
|
+
while stop is None or not stop.is_set():
|
|
11
13
|
try:
|
|
12
|
-
|
|
14
|
+
item = node.outgoing_queue.get(timeout=0.5)
|
|
15
|
+
except Empty:
|
|
16
|
+
continue
|
|
13
17
|
except Exception:
|
|
14
18
|
node.logger.exception("Error taking from outgoing queue")
|
|
15
19
|
continue
|
|
16
20
|
|
|
21
|
+
payload = None
|
|
22
|
+
addr = None
|
|
23
|
+
accounted_size = None
|
|
24
|
+
if isinstance(item, tuple) and len(item) == 3:
|
|
25
|
+
payload, addr, accounted_size = item
|
|
26
|
+
elif isinstance(item, tuple) and len(item) == 2:
|
|
27
|
+
payload, addr = item
|
|
28
|
+
else:
|
|
29
|
+
node.logger.warning("Outgoing queue item has unexpected shape: %r", item)
|
|
30
|
+
continue
|
|
31
|
+
|
|
32
|
+
if stop is not None and stop.is_set():
|
|
33
|
+
if accounted_size is not None:
|
|
34
|
+
try:
|
|
35
|
+
with node.outgoing_queue_size_lock:
|
|
36
|
+
node.outgoing_queue_size = max(0, node.outgoing_queue_size - int(accounted_size))
|
|
37
|
+
except Exception:
|
|
38
|
+
node.logger.exception("Failed updating outgoing_queue_size on shutdown")
|
|
39
|
+
break
|
|
40
|
+
|
|
17
41
|
try:
|
|
18
42
|
node.outgoing_socket.sendto(payload, addr)
|
|
19
43
|
except Exception as exc:
|
|
20
44
|
node.logger.warning("Error sending message to %s: %s", addr, exc)
|
|
45
|
+
finally:
|
|
46
|
+
if accounted_size is not None:
|
|
47
|
+
try:
|
|
48
|
+
with node.outgoing_queue_size_lock:
|
|
49
|
+
node.outgoing_queue_size = max(0, node.outgoing_queue_size - int(accounted_size))
|
|
50
|
+
except Exception:
|
|
51
|
+
node.logger.exception("Failed updating outgoing_queue_size")
|
|
52
|
+
|
|
53
|
+
node.logger.info("Outgoing message processor stopped")
|