ReticulumTelemetryHub 0.1.0__py3-none-any.whl → 0.143.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reticulum_telemetry_hub/api/__init__.py +23 -0
- reticulum_telemetry_hub/api/models.py +323 -0
- reticulum_telemetry_hub/api/service.py +836 -0
- reticulum_telemetry_hub/api/storage.py +528 -0
- reticulum_telemetry_hub/api/storage_base.py +156 -0
- reticulum_telemetry_hub/api/storage_models.py +118 -0
- reticulum_telemetry_hub/atak_cot/__init__.py +49 -0
- reticulum_telemetry_hub/atak_cot/base.py +277 -0
- reticulum_telemetry_hub/atak_cot/chat.py +506 -0
- reticulum_telemetry_hub/atak_cot/detail.py +235 -0
- reticulum_telemetry_hub/atak_cot/event.py +181 -0
- reticulum_telemetry_hub/atak_cot/pytak_client.py +569 -0
- reticulum_telemetry_hub/atak_cot/tak_connector.py +848 -0
- reticulum_telemetry_hub/config/__init__.py +25 -0
- reticulum_telemetry_hub/config/constants.py +7 -0
- reticulum_telemetry_hub/config/manager.py +515 -0
- reticulum_telemetry_hub/config/models.py +215 -0
- reticulum_telemetry_hub/embedded_lxmd/__init__.py +5 -0
- reticulum_telemetry_hub/embedded_lxmd/embedded.py +418 -0
- reticulum_telemetry_hub/internal_api/__init__.py +21 -0
- reticulum_telemetry_hub/internal_api/bus.py +344 -0
- reticulum_telemetry_hub/internal_api/core.py +690 -0
- reticulum_telemetry_hub/internal_api/v1/__init__.py +74 -0
- reticulum_telemetry_hub/internal_api/v1/enums.py +109 -0
- reticulum_telemetry_hub/internal_api/v1/manifest.json +8 -0
- reticulum_telemetry_hub/internal_api/v1/schemas.py +478 -0
- reticulum_telemetry_hub/internal_api/versioning.py +63 -0
- reticulum_telemetry_hub/lxmf_daemon/Handlers.py +122 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMF.py +252 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMPeer.py +898 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMRouter.py +4227 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMessage.py +1006 -0
- reticulum_telemetry_hub/lxmf_daemon/LXStamper.py +490 -0
- reticulum_telemetry_hub/lxmf_daemon/__init__.py +10 -0
- reticulum_telemetry_hub/lxmf_daemon/_version.py +1 -0
- reticulum_telemetry_hub/lxmf_daemon/lxmd.py +1655 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/fields/field_telemetry_stream.py +6 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/__init__.py +3 -0
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/appearance.py +19 -19
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/peer.py +17 -13
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/__init__.py +65 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/acceleration.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/ambient_light.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/angular_velocity.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/battery.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/connection_map.py +258 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/generic.py +841 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/gravity.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/humidity.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/information.py +42 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/location.py +110 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/lxmf_propagation.py +429 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/magnetic_field.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/physical_link.py +53 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/pressure.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/proximity.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/received.py +75 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/rns_transport.py +209 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor.py +65 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor_enum.py +27 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor_mapping.py +58 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/temperature.py +37 -0
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/sensors/time.py +36 -32
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/telemeter.py +26 -23
- reticulum_telemetry_hub/lxmf_telemetry/sampler.py +229 -0
- reticulum_telemetry_hub/lxmf_telemetry/telemeter_manager.py +409 -0
- reticulum_telemetry_hub/lxmf_telemetry/telemetry_controller.py +804 -0
- reticulum_telemetry_hub/northbound/__init__.py +5 -0
- reticulum_telemetry_hub/northbound/app.py +195 -0
- reticulum_telemetry_hub/northbound/auth.py +119 -0
- reticulum_telemetry_hub/northbound/gateway.py +310 -0
- reticulum_telemetry_hub/northbound/internal_adapter.py +302 -0
- reticulum_telemetry_hub/northbound/models.py +213 -0
- reticulum_telemetry_hub/northbound/routes_chat.py +123 -0
- reticulum_telemetry_hub/northbound/routes_files.py +119 -0
- reticulum_telemetry_hub/northbound/routes_rest.py +345 -0
- reticulum_telemetry_hub/northbound/routes_subscribers.py +150 -0
- reticulum_telemetry_hub/northbound/routes_topics.py +178 -0
- reticulum_telemetry_hub/northbound/routes_ws.py +107 -0
- reticulum_telemetry_hub/northbound/serializers.py +72 -0
- reticulum_telemetry_hub/northbound/services.py +373 -0
- reticulum_telemetry_hub/northbound/websocket.py +855 -0
- reticulum_telemetry_hub/reticulum_server/__main__.py +2237 -0
- reticulum_telemetry_hub/reticulum_server/command_manager.py +1268 -0
- reticulum_telemetry_hub/reticulum_server/command_text.py +399 -0
- reticulum_telemetry_hub/reticulum_server/constants.py +1 -0
- reticulum_telemetry_hub/reticulum_server/event_log.py +357 -0
- reticulum_telemetry_hub/reticulum_server/internal_adapter.py +358 -0
- reticulum_telemetry_hub/reticulum_server/outbound_queue.py +312 -0
- reticulum_telemetry_hub/reticulum_server/services.py +422 -0
- reticulumtelemetryhub-0.143.0.dist-info/METADATA +181 -0
- reticulumtelemetryhub-0.143.0.dist-info/RECORD +97 -0
- {reticulumtelemetryhub-0.1.0.dist-info → reticulumtelemetryhub-0.143.0.dist-info}/WHEEL +1 -1
- reticulumtelemetryhub-0.143.0.dist-info/licenses/LICENSE +277 -0
- lxmf_telemetry/model/fields/field_telemetry_stream.py +0 -7
- lxmf_telemetry/model/persistance/__init__.py +0 -3
- lxmf_telemetry/model/persistance/sensors/location.py +0 -69
- lxmf_telemetry/model/persistance/sensors/magnetic_field.py +0 -36
- lxmf_telemetry/model/persistance/sensors/sensor.py +0 -44
- lxmf_telemetry/model/persistance/sensors/sensor_enum.py +0 -24
- lxmf_telemetry/model/persistance/sensors/sensor_mapping.py +0 -9
- lxmf_telemetry/telemetry_controller.py +0 -124
- reticulum_server/main.py +0 -182
- reticulumtelemetryhub-0.1.0.dist-info/METADATA +0 -15
- reticulumtelemetryhub-0.1.0.dist-info/RECORD +0 -19
- {lxmf_telemetry → reticulum_telemetry_hub}/__init__.py +0 -0
- {lxmf_telemetry/model/persistance/sensors → reticulum_telemetry_hub/lxmf_telemetry}/__init__.py +0 -0
- {reticulum_server → reticulum_telemetry_hub/reticulum_server}/__init__.py +0 -0
|
@@ -0,0 +1,4227 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import time
|
|
4
|
+
import math
|
|
5
|
+
import random
|
|
6
|
+
import base64
|
|
7
|
+
import atexit
|
|
8
|
+
import signal
|
|
9
|
+
import threading
|
|
10
|
+
|
|
11
|
+
from collections import deque
|
|
12
|
+
|
|
13
|
+
import RNS
|
|
14
|
+
import RNS.vendor.umsgpack as msgpack
|
|
15
|
+
|
|
16
|
+
from .LXMF import APP_NAME
|
|
17
|
+
from .LXMF import FIELD_TICKET
|
|
18
|
+
from .LXMF import PN_META_NAME
|
|
19
|
+
from .LXMF import pn_announce_data_is_valid
|
|
20
|
+
|
|
21
|
+
from .LXMPeer import LXMPeer
|
|
22
|
+
from .LXMessage import LXMessage
|
|
23
|
+
from .Handlers import LXMFDeliveryAnnounceHandler
|
|
24
|
+
from .Handlers import LXMFPropagationAnnounceHandler
|
|
25
|
+
|
|
26
|
+
from . import LXStamper
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class LXMRouter:
|
|
30
|
+
MAX_DELIVERY_ATTEMPTS = 5
|
|
31
|
+
PROCESSING_INTERVAL = 4
|
|
32
|
+
DELIVERY_RETRY_WAIT = 10
|
|
33
|
+
PATH_REQUEST_WAIT = 7
|
|
34
|
+
MAX_PATHLESS_TRIES = 1
|
|
35
|
+
LINK_MAX_INACTIVITY = 10 * 60
|
|
36
|
+
P_LINK_MAX_INACTIVITY = 3 * 60
|
|
37
|
+
|
|
38
|
+
MESSAGE_EXPIRY = 30 * 24 * 60 * 60
|
|
39
|
+
STAMP_COST_EXPIRY = 45 * 24 * 60 * 60
|
|
40
|
+
|
|
41
|
+
NODE_ANNOUNCE_DELAY = 20
|
|
42
|
+
|
|
43
|
+
MAX_PEERS = 20
|
|
44
|
+
AUTOPEER = True
|
|
45
|
+
AUTOPEER_MAXDEPTH = 4
|
|
46
|
+
FASTEST_N_RANDOM_POOL = 2
|
|
47
|
+
ROTATION_HEADROOM_PCT = 10
|
|
48
|
+
ROTATION_AR_MAX = 0.5
|
|
49
|
+
|
|
50
|
+
PEERING_COST = 18
|
|
51
|
+
MAX_PEERING_COST = 26
|
|
52
|
+
PROPAGATION_COST_MIN = 13
|
|
53
|
+
PROPAGATION_COST_FLEX = 3
|
|
54
|
+
PROPAGATION_COST = 16
|
|
55
|
+
PROPAGATION_LIMIT = 256
|
|
56
|
+
SYNC_LIMIT = PROPAGATION_LIMIT * 40
|
|
57
|
+
DELIVERY_LIMIT = 1000
|
|
58
|
+
|
|
59
|
+
PR_PATH_TIMEOUT = 10
|
|
60
|
+
PN_STAMP_THROTTLE = 180
|
|
61
|
+
|
|
62
|
+
PR_IDLE = 0x00
|
|
63
|
+
PR_PATH_REQUESTED = 0x01
|
|
64
|
+
PR_LINK_ESTABLISHING = 0x02
|
|
65
|
+
PR_LINK_ESTABLISHED = 0x03
|
|
66
|
+
PR_REQUEST_SENT = 0x04
|
|
67
|
+
PR_RECEIVING = 0x05
|
|
68
|
+
PR_RESPONSE_RECEIVED = 0x06
|
|
69
|
+
PR_COMPLETE = 0x07
|
|
70
|
+
PR_NO_PATH = 0xF0
|
|
71
|
+
PR_LINK_FAILED = 0xF1
|
|
72
|
+
PR_TRANSFER_FAILED = 0xF2
|
|
73
|
+
PR_NO_IDENTITY_RCVD = 0xF3
|
|
74
|
+
PR_NO_ACCESS = 0xF4
|
|
75
|
+
PR_FAILED = 0xFE
|
|
76
|
+
|
|
77
|
+
PR_ALL_MESSAGES = 0x00
|
|
78
|
+
|
|
79
|
+
DUPLICATE_SIGNAL = "lxmf_duplicate"
|
|
80
|
+
|
|
81
|
+
STATS_GET_PATH = "/pn/get/stats"
|
|
82
|
+
SYNC_REQUEST_PATH = "/pn/peer/sync"
|
|
83
|
+
UNPEER_REQUEST_PATH = "/pn/peer/unpeer"
|
|
84
|
+
|
|
85
|
+
### Developer-facing API ##############################
|
|
86
|
+
#######################################################
|
|
87
|
+
|
|
88
|
+
def __init__(
|
|
89
|
+
self,
|
|
90
|
+
identity=None,
|
|
91
|
+
storagepath=None,
|
|
92
|
+
autopeer=AUTOPEER,
|
|
93
|
+
autopeer_maxdepth=None,
|
|
94
|
+
propagation_limit=PROPAGATION_LIMIT,
|
|
95
|
+
delivery_limit=DELIVERY_LIMIT,
|
|
96
|
+
sync_limit=SYNC_LIMIT,
|
|
97
|
+
enforce_ratchets=False,
|
|
98
|
+
enforce_stamps=False,
|
|
99
|
+
static_peers=[],
|
|
100
|
+
max_peers=None,
|
|
101
|
+
from_static_only=False,
|
|
102
|
+
sync_strategy=LXMPeer.STRATEGY_PERSISTENT,
|
|
103
|
+
propagation_cost=PROPAGATION_COST,
|
|
104
|
+
propagation_cost_flexibility=PROPAGATION_COST_FLEX,
|
|
105
|
+
peering_cost=PEERING_COST,
|
|
106
|
+
max_peering_cost=MAX_PEERING_COST,
|
|
107
|
+
name=None,
|
|
108
|
+
):
|
|
109
|
+
|
|
110
|
+
random.seed(os.urandom(10))
|
|
111
|
+
|
|
112
|
+
self.pending_inbound = []
|
|
113
|
+
self.pending_outbound = []
|
|
114
|
+
self.failed_outbound = []
|
|
115
|
+
self.direct_links = {}
|
|
116
|
+
self.backchannel_links = {}
|
|
117
|
+
self.delivery_destinations = {}
|
|
118
|
+
|
|
119
|
+
self.prioritised_list = []
|
|
120
|
+
self.ignored_list = []
|
|
121
|
+
self.allowed_list = []
|
|
122
|
+
self.control_allowed_list = []
|
|
123
|
+
self.auth_required = False
|
|
124
|
+
self.retain_synced_on_node = False
|
|
125
|
+
|
|
126
|
+
self.default_sync_strategy = sync_strategy
|
|
127
|
+
self.processing_outbound = False
|
|
128
|
+
self.processing_inbound = False
|
|
129
|
+
self.processing_count = 0
|
|
130
|
+
self.name = name
|
|
131
|
+
|
|
132
|
+
self.propagation_node = False
|
|
133
|
+
self.propagation_node_start_time = None
|
|
134
|
+
|
|
135
|
+
if storagepath == None:
|
|
136
|
+
raise ValueError("LXMF cannot be initialised without a storage path")
|
|
137
|
+
else:
|
|
138
|
+
self.storagepath = storagepath + "/lxmf"
|
|
139
|
+
self.ratchetpath = self.storagepath + "/ratchets"
|
|
140
|
+
|
|
141
|
+
self.outbound_propagation_node = None
|
|
142
|
+
self.outbound_propagation_link = None
|
|
143
|
+
|
|
144
|
+
if delivery_limit == None:
|
|
145
|
+
delivery_limit = LXMRouter.DELIVERY_LIMIT
|
|
146
|
+
if propagation_cost < LXMRouter.PROPAGATION_COST_MIN:
|
|
147
|
+
propagation_cost = LXMRouter.PROPAGATION_COST_MIN
|
|
148
|
+
|
|
149
|
+
self._message_storage_limit = None
|
|
150
|
+
self._information_storage_limit = None
|
|
151
|
+
self.propagation_per_transfer_limit = propagation_limit
|
|
152
|
+
self.propagation_per_sync_limit = sync_limit
|
|
153
|
+
self.delivery_per_transfer_limit = delivery_limit
|
|
154
|
+
self.propagation_stamp_cost = propagation_cost
|
|
155
|
+
self.propagation_stamp_cost_flexibility = propagation_cost_flexibility
|
|
156
|
+
self.peering_cost = peering_cost
|
|
157
|
+
self.max_peering_cost = max_peering_cost
|
|
158
|
+
self.enforce_ratchets = enforce_ratchets
|
|
159
|
+
self._enforce_stamps = enforce_stamps
|
|
160
|
+
self.pending_deferred_stamps = {}
|
|
161
|
+
self.throttled_peers = {}
|
|
162
|
+
|
|
163
|
+
if (
|
|
164
|
+
sync_limit == None
|
|
165
|
+
or self.propagation_per_sync_limit < self.propagation_per_transfer_limit
|
|
166
|
+
):
|
|
167
|
+
self.propagation_per_sync_limit = self.propagation_per_transfer_limit
|
|
168
|
+
|
|
169
|
+
self.wants_download_on_path_available_from = None
|
|
170
|
+
self.wants_download_on_path_available_to = None
|
|
171
|
+
self.propagation_transfer_state = LXMRouter.PR_IDLE
|
|
172
|
+
self.propagation_transfer_progress = 0.0
|
|
173
|
+
self.propagation_transfer_last_result = None
|
|
174
|
+
self.propagation_transfer_last_duplicates = None
|
|
175
|
+
self.propagation_transfer_max_messages = None
|
|
176
|
+
self.prioritise_rotating_unreachable_peers = False
|
|
177
|
+
self.active_propagation_links = []
|
|
178
|
+
self.validated_peer_links = {}
|
|
179
|
+
self.locally_delivered_transient_ids = {}
|
|
180
|
+
self.locally_processed_transient_ids = {}
|
|
181
|
+
self.outbound_stamp_costs = {}
|
|
182
|
+
self.available_tickets = {"outbound": {}, "inbound": {}, "last_deliveries": {}}
|
|
183
|
+
|
|
184
|
+
self.cost_file_lock = threading.Lock()
|
|
185
|
+
self.ticket_file_lock = threading.Lock()
|
|
186
|
+
self.stamp_gen_lock = threading.Lock()
|
|
187
|
+
self.exit_handler_running = False
|
|
188
|
+
|
|
189
|
+
if identity == None:
|
|
190
|
+
identity = RNS.Identity()
|
|
191
|
+
|
|
192
|
+
self.identity = identity
|
|
193
|
+
self.propagation_destination = RNS.Destination(
|
|
194
|
+
self.identity,
|
|
195
|
+
RNS.Destination.IN,
|
|
196
|
+
RNS.Destination.SINGLE,
|
|
197
|
+
APP_NAME,
|
|
198
|
+
"propagation",
|
|
199
|
+
)
|
|
200
|
+
self.propagation_destination.set_default_app_data(
|
|
201
|
+
self.get_propagation_node_app_data
|
|
202
|
+
)
|
|
203
|
+
self.control_destination = None
|
|
204
|
+
self.client_propagation_messages_received = 0
|
|
205
|
+
self.client_propagation_messages_served = 0
|
|
206
|
+
self.unpeered_propagation_incoming = 0
|
|
207
|
+
self.unpeered_propagation_rx_bytes = 0
|
|
208
|
+
|
|
209
|
+
if autopeer != None:
|
|
210
|
+
self.autopeer = autopeer
|
|
211
|
+
else:
|
|
212
|
+
self.autopeer = LXMRouter.AUTOPEER
|
|
213
|
+
|
|
214
|
+
if autopeer_maxdepth != None:
|
|
215
|
+
self.autopeer_maxdepth = autopeer_maxdepth
|
|
216
|
+
else:
|
|
217
|
+
self.autopeer_maxdepth = LXMRouter.AUTOPEER_MAXDEPTH
|
|
218
|
+
|
|
219
|
+
if max_peers == None:
|
|
220
|
+
self.max_peers = LXMRouter.MAX_PEERS
|
|
221
|
+
else:
|
|
222
|
+
if type(max_peers) == int and max_peers >= 0:
|
|
223
|
+
self.max_peers = max_peers
|
|
224
|
+
else:
|
|
225
|
+
raise ValueError(f"Invalid value for max_peers: {max_peers}")
|
|
226
|
+
|
|
227
|
+
self.from_static_only = from_static_only
|
|
228
|
+
if type(static_peers) != list:
|
|
229
|
+
raise ValueError(
|
|
230
|
+
f"Invalid type supplied for static peer list: {type(static_peers)}"
|
|
231
|
+
)
|
|
232
|
+
else:
|
|
233
|
+
for static_peer in static_peers:
|
|
234
|
+
if type(static_peer) != bytes:
|
|
235
|
+
raise ValueError(
|
|
236
|
+
f"Invalid static peer destination hash: {static_peer}"
|
|
237
|
+
)
|
|
238
|
+
else:
|
|
239
|
+
if len(static_peer) != RNS.Reticulum.TRUNCATED_HASHLENGTH // 8:
|
|
240
|
+
raise ValueError(
|
|
241
|
+
f"Invalid static peer destination hash: {static_peer}"
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
self.static_peers = static_peers
|
|
245
|
+
|
|
246
|
+
self.peers = {}
|
|
247
|
+
self.propagation_entries = {}
|
|
248
|
+
|
|
249
|
+
self.peer_distribution_queue = deque()
|
|
250
|
+
|
|
251
|
+
RNS.Transport.register_announce_handler(LXMFDeliveryAnnounceHandler(self))
|
|
252
|
+
RNS.Transport.register_announce_handler(LXMFPropagationAnnounceHandler(self))
|
|
253
|
+
|
|
254
|
+
self.__delivery_callback = None
|
|
255
|
+
|
|
256
|
+
try:
|
|
257
|
+
if os.path.isfile(self.storagepath + "/local_deliveries"):
|
|
258
|
+
locally_delivered_file = open(
|
|
259
|
+
self.storagepath + "/local_deliveries", "rb"
|
|
260
|
+
)
|
|
261
|
+
data = locally_delivered_file.read()
|
|
262
|
+
locally_delivered_file.close()
|
|
263
|
+
self.locally_delivered_transient_ids = msgpack.unpackb(data)
|
|
264
|
+
if not type(self.locally_delivered_transient_ids) == dict:
|
|
265
|
+
RNS.log(
|
|
266
|
+
"Invalid data format for loaded locally delivered transient IDs, recreating...",
|
|
267
|
+
RNS.LOG_ERROR,
|
|
268
|
+
)
|
|
269
|
+
self.locally_delivered_transient_ids = {}
|
|
270
|
+
|
|
271
|
+
except Exception as e:
|
|
272
|
+
RNS.log(
|
|
273
|
+
"Could not load locally delivered message ID cache from storage. The contained exception was: "
|
|
274
|
+
+ str(e),
|
|
275
|
+
RNS.LOG_ERROR,
|
|
276
|
+
)
|
|
277
|
+
self.locally_delivered_transient_ids = {}
|
|
278
|
+
|
|
279
|
+
try:
|
|
280
|
+
if os.path.isfile(self.storagepath + "/locally_processed"):
|
|
281
|
+
locally_processed_file = open(
|
|
282
|
+
self.storagepath + "/locally_processed", "rb"
|
|
283
|
+
)
|
|
284
|
+
data = locally_processed_file.read()
|
|
285
|
+
locally_processed_file.close()
|
|
286
|
+
self.locally_processed_transient_ids = msgpack.unpackb(data)
|
|
287
|
+
if not type(self.locally_processed_transient_ids) == dict:
|
|
288
|
+
RNS.log(
|
|
289
|
+
"Invalid data format for loaded locally processed transient IDs, recreating...",
|
|
290
|
+
RNS.LOG_ERROR,
|
|
291
|
+
)
|
|
292
|
+
self.locally_processed_transient_ids = {}
|
|
293
|
+
|
|
294
|
+
except Exception as e:
|
|
295
|
+
RNS.log(
|
|
296
|
+
"Could not load locally processed message ID cache from storage. The contained exception was: "
|
|
297
|
+
+ str(e),
|
|
298
|
+
RNS.LOG_ERROR,
|
|
299
|
+
)
|
|
300
|
+
self.locally_processed_transient_ids = {}
|
|
301
|
+
|
|
302
|
+
try:
|
|
303
|
+
self.clean_transient_id_caches()
|
|
304
|
+
|
|
305
|
+
except Exception as e:
|
|
306
|
+
RNS.log(
|
|
307
|
+
"Could not clean transient ID caches. The contained exception was : "
|
|
308
|
+
+ str(e),
|
|
309
|
+
RNS.LOG_ERROR,
|
|
310
|
+
)
|
|
311
|
+
self.locally_delivered_transient_ids = {}
|
|
312
|
+
self.locally_processed_transient_ids = {}
|
|
313
|
+
|
|
314
|
+
try:
|
|
315
|
+
if os.path.isfile(self.storagepath + "/outbound_stamp_costs"):
|
|
316
|
+
with self.cost_file_lock:
|
|
317
|
+
with open(
|
|
318
|
+
self.storagepath + "/outbound_stamp_costs", "rb"
|
|
319
|
+
) as outbound_stamp_cost_file:
|
|
320
|
+
data = outbound_stamp_cost_file.read()
|
|
321
|
+
self.outbound_stamp_costs = msgpack.unpackb(data)
|
|
322
|
+
if not type(self.outbound_stamp_costs) == dict:
|
|
323
|
+
RNS.log(
|
|
324
|
+
"Invalid data format for loaded outbound stamp costs, recreating...",
|
|
325
|
+
RNS.LOG_ERROR,
|
|
326
|
+
)
|
|
327
|
+
self.outbound_stamp_costs = {}
|
|
328
|
+
|
|
329
|
+
self.clean_outbound_stamp_costs()
|
|
330
|
+
self.save_outbound_stamp_costs()
|
|
331
|
+
|
|
332
|
+
except Exception as e:
|
|
333
|
+
RNS.log(
|
|
334
|
+
"Could not load outbound stamp costs from storage. The contained exception was: "
|
|
335
|
+
+ str(e),
|
|
336
|
+
RNS.LOG_ERROR,
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
try:
|
|
340
|
+
if os.path.isfile(self.storagepath + "/available_tickets"):
|
|
341
|
+
with self.ticket_file_lock:
|
|
342
|
+
with open(
|
|
343
|
+
self.storagepath + "/available_tickets", "rb"
|
|
344
|
+
) as available_tickets_file:
|
|
345
|
+
data = available_tickets_file.read()
|
|
346
|
+
self.available_tickets = msgpack.unpackb(data)
|
|
347
|
+
if not type(self.available_tickets) == dict:
|
|
348
|
+
RNS.log(
|
|
349
|
+
"Invalid data format for loaded available tickets, recreating...",
|
|
350
|
+
RNS.LOG_ERROR,
|
|
351
|
+
)
|
|
352
|
+
self.available_tickets = {
|
|
353
|
+
"outbound": {},
|
|
354
|
+
"inbound": {},
|
|
355
|
+
"last_deliveries": {},
|
|
356
|
+
}
|
|
357
|
+
if not "outbound" in self.available_tickets:
|
|
358
|
+
RNS.log(
|
|
359
|
+
"Missing outbound entry in loaded available tickets, recreating...",
|
|
360
|
+
RNS.LOG_ERROR,
|
|
361
|
+
)
|
|
362
|
+
self.available_tickets["outbound"] = {}
|
|
363
|
+
if not "inbound" in self.available_tickets:
|
|
364
|
+
RNS.log(
|
|
365
|
+
"Missing inbound entry in loaded available tickets, recreating...",
|
|
366
|
+
RNS.LOG_ERROR,
|
|
367
|
+
)
|
|
368
|
+
self.available_tickets["inbound"] = {}
|
|
369
|
+
if not "last_deliveries" in self.available_tickets:
|
|
370
|
+
RNS.log(
|
|
371
|
+
"Missing local_deliveries entry in loaded available tickets, recreating...",
|
|
372
|
+
RNS.LOG_ERROR,
|
|
373
|
+
)
|
|
374
|
+
self.available_tickets["last_deliveries"] = {}
|
|
375
|
+
|
|
376
|
+
self.clean_available_tickets()
|
|
377
|
+
self.save_available_tickets()
|
|
378
|
+
|
|
379
|
+
except Exception as e:
|
|
380
|
+
RNS.log(
|
|
381
|
+
"Could not load outbound stamp costs from storage. The contained exception was: "
|
|
382
|
+
+ str(e),
|
|
383
|
+
RNS.LOG_ERROR,
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
atexit.register(self.exit_handler)
|
|
387
|
+
signal.signal(signal.SIGINT, self.sigint_handler)
|
|
388
|
+
signal.signal(signal.SIGTERM, self.sigterm_handler)
|
|
389
|
+
|
|
390
|
+
job_thread = threading.Thread(target=self.jobloop)
|
|
391
|
+
job_thread.setDaemon(True)
|
|
392
|
+
job_thread.start()
|
|
393
|
+
|
|
394
|
+
def announce(self, destination_hash, attached_interface=None):
|
|
395
|
+
if destination_hash in self.delivery_destinations:
|
|
396
|
+
self.delivery_destinations[destination_hash].announce(
|
|
397
|
+
app_data=self.get_announce_app_data(destination_hash),
|
|
398
|
+
attached_interface=attached_interface,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
def get_propagation_node_announce_metadata(self):
|
|
402
|
+
metadata = {}
|
|
403
|
+
if self.name:
|
|
404
|
+
metadata[PN_META_NAME] = str(self.name).encode("utf-8")
|
|
405
|
+
return metadata
|
|
406
|
+
|
|
407
|
+
def get_propagation_node_app_data(self):
|
|
408
|
+
metadata = self.get_propagation_node_announce_metadata()
|
|
409
|
+
node_state = self.propagation_node and not self.from_static_only
|
|
410
|
+
stamp_cost = [
|
|
411
|
+
self.propagation_stamp_cost,
|
|
412
|
+
self.propagation_stamp_cost_flexibility,
|
|
413
|
+
self.peering_cost,
|
|
414
|
+
]
|
|
415
|
+
announce_data = [
|
|
416
|
+
False, # 0: Legacy LXMF PN support
|
|
417
|
+
int(time.time()), # 1: Current node timebase
|
|
418
|
+
node_state, # 2: Boolean flag signalling propagation node state
|
|
419
|
+
self.propagation_per_transfer_limit, # 3: Per-transfer limit for message propagation in kilobytes
|
|
420
|
+
self.propagation_per_sync_limit, # 4: Limit for incoming propagation node syncs
|
|
421
|
+
stamp_cost, # 5: Propagation stamp cost for this node
|
|
422
|
+
metadata,
|
|
423
|
+
] # 6: Node metadata
|
|
424
|
+
|
|
425
|
+
return msgpack.packb(announce_data)
|
|
426
|
+
|
|
427
|
+
def announce_propagation_node(self):
|
|
428
|
+
def delayed_announce():
|
|
429
|
+
time.sleep(LXMRouter.NODE_ANNOUNCE_DELAY)
|
|
430
|
+
self.propagation_destination.announce(
|
|
431
|
+
app_data=self.get_propagation_node_app_data()
|
|
432
|
+
)
|
|
433
|
+
|
|
434
|
+
da_thread = threading.Thread(target=delayed_announce)
|
|
435
|
+
da_thread.setDaemon(True)
|
|
436
|
+
da_thread.start()
|
|
437
|
+
|
|
438
|
+
def register_delivery_identity(self, identity, display_name=None, stamp_cost=None):
|
|
439
|
+
if len(self.delivery_destinations) != 0:
|
|
440
|
+
RNS.log(
|
|
441
|
+
"Currently only one delivery identity is supported per LXMF router instance",
|
|
442
|
+
RNS.LOG_ERROR,
|
|
443
|
+
)
|
|
444
|
+
return None
|
|
445
|
+
|
|
446
|
+
if not os.path.isdir(self.ratchetpath):
|
|
447
|
+
os.makedirs(self.ratchetpath)
|
|
448
|
+
|
|
449
|
+
delivery_destination = RNS.Destination(
|
|
450
|
+
identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "delivery"
|
|
451
|
+
)
|
|
452
|
+
delivery_destination.enable_ratchets(
|
|
453
|
+
f"{self.ratchetpath}/{RNS.hexrep(delivery_destination.hash, delimit=False)}.ratchets"
|
|
454
|
+
)
|
|
455
|
+
delivery_destination.set_packet_callback(self.delivery_packet)
|
|
456
|
+
delivery_destination.set_link_established_callback(
|
|
457
|
+
self.delivery_link_established
|
|
458
|
+
)
|
|
459
|
+
delivery_destination.display_name = display_name
|
|
460
|
+
|
|
461
|
+
if self.enforce_ratchets:
|
|
462
|
+
delivery_destination.enforce_ratchets()
|
|
463
|
+
|
|
464
|
+
if display_name != None:
|
|
465
|
+
|
|
466
|
+
def get_app_data():
|
|
467
|
+
return self.get_announce_app_data(delivery_destination.hash)
|
|
468
|
+
|
|
469
|
+
delivery_destination.set_default_app_data(get_app_data)
|
|
470
|
+
|
|
471
|
+
self.delivery_destinations[delivery_destination.hash] = delivery_destination
|
|
472
|
+
self.set_inbound_stamp_cost(delivery_destination.hash, stamp_cost)
|
|
473
|
+
|
|
474
|
+
return delivery_destination
|
|
475
|
+
|
|
476
|
+
def register_delivery_callback(self, callback):
|
|
477
|
+
self.__delivery_callback = callback
|
|
478
|
+
|
|
479
|
+
def set_inbound_stamp_cost(self, destination_hash, stamp_cost):
|
|
480
|
+
if destination_hash in self.delivery_destinations:
|
|
481
|
+
delivery_destination = self.delivery_destinations[destination_hash]
|
|
482
|
+
if stamp_cost == None:
|
|
483
|
+
delivery_destination.stamp_cost = None
|
|
484
|
+
return True
|
|
485
|
+
elif type(stamp_cost) == int:
|
|
486
|
+
if stamp_cost < 1:
|
|
487
|
+
delivery_destination.stamp_cost = None
|
|
488
|
+
elif stamp_cost < 255:
|
|
489
|
+
delivery_destination.stamp_cost = stamp_cost
|
|
490
|
+
else:
|
|
491
|
+
return False
|
|
492
|
+
|
|
493
|
+
return True
|
|
494
|
+
|
|
495
|
+
return False
|
|
496
|
+
|
|
497
|
+
def get_outbound_stamp_cost(self, destination_hash):
|
|
498
|
+
if destination_hash in self.outbound_stamp_costs:
|
|
499
|
+
stamp_cost = self.outbound_stamp_costs[destination_hash][1]
|
|
500
|
+
return stamp_cost
|
|
501
|
+
else:
|
|
502
|
+
return None
|
|
503
|
+
|
|
504
|
+
def set_active_propagation_node(self, destination_hash):
|
|
505
|
+
self.set_outbound_propagation_node(destination_hash)
|
|
506
|
+
# self.set_inbound_propagation_node(destination_hash)
|
|
507
|
+
|
|
508
|
+
def set_outbound_propagation_node(self, destination_hash):
|
|
509
|
+
if (
|
|
510
|
+
len(destination_hash) != RNS.Identity.TRUNCATED_HASHLENGTH // 8
|
|
511
|
+
or type(destination_hash) != bytes
|
|
512
|
+
):
|
|
513
|
+
raise ValueError("Invalid destination hash for outbound propagation node")
|
|
514
|
+
else:
|
|
515
|
+
if self.outbound_propagation_node != destination_hash:
|
|
516
|
+
self.outbound_propagation_node = destination_hash
|
|
517
|
+
if self.outbound_propagation_link != None:
|
|
518
|
+
if (
|
|
519
|
+
self.outbound_propagation_link.destination.hash
|
|
520
|
+
!= destination_hash
|
|
521
|
+
):
|
|
522
|
+
self.outbound_propagation_link.teardown()
|
|
523
|
+
self.outbound_propagation_link = None
|
|
524
|
+
|
|
525
|
+
def get_outbound_propagation_node(self):
|
|
526
|
+
return self.outbound_propagation_node
|
|
527
|
+
|
|
528
|
+
def get_outbound_propagation_cost(self):
|
|
529
|
+
target_propagation_cost = None
|
|
530
|
+
pn_destination_hash = self.get_outbound_propagation_node()
|
|
531
|
+
pn_app_data = RNS.Identity.recall_app_data(pn_destination_hash)
|
|
532
|
+
if pn_announce_data_is_valid(pn_app_data):
|
|
533
|
+
pn_config = msgpack.unpackb(pn_app_data)
|
|
534
|
+
target_propagation_cost = pn_config[5][0]
|
|
535
|
+
|
|
536
|
+
if not target_propagation_cost:
|
|
537
|
+
RNS.log(
|
|
538
|
+
f"Could not retrieve cached propagation node config. Requesting path to propagation node to get target propagation cost...",
|
|
539
|
+
RNS.LOG_DEBUG,
|
|
540
|
+
)
|
|
541
|
+
RNS.Transport.request_path(pn_destination_hash)
|
|
542
|
+
timeout = time.time() + LXMRouter.PATH_REQUEST_WAIT
|
|
543
|
+
while (
|
|
544
|
+
not RNS.Identity.recall_app_data(pn_destination_hash)
|
|
545
|
+
and time.time() < timeout
|
|
546
|
+
):
|
|
547
|
+
time.sleep(0.5)
|
|
548
|
+
|
|
549
|
+
pn_app_data = RNS.Identity.recall_app_data(pn_destination_hash)
|
|
550
|
+
if pn_announce_data_is_valid(pn_app_data):
|
|
551
|
+
pn_config = msgpack.unpackb(pn_app_data)
|
|
552
|
+
target_propagation_cost = pn_config[5][0]
|
|
553
|
+
|
|
554
|
+
if not target_propagation_cost:
|
|
555
|
+
RNS.log(
|
|
556
|
+
"Propagation node stamp cost still unavailable after path request",
|
|
557
|
+
RNS.LOG_ERROR,
|
|
558
|
+
)
|
|
559
|
+
return target_propagation_cost
|
|
560
|
+
|
|
561
|
+
def set_inbound_propagation_node(self, destination_hash):
|
|
562
|
+
# TODO: Implement
|
|
563
|
+
raise NotImplementedError(
|
|
564
|
+
"Inbound/outbound propagation node differentiation is currently not implemented"
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
def get_inbound_propagation_node(self):
|
|
568
|
+
return self.get_outbound_propagation_node()
|
|
569
|
+
|
|
570
|
+
def set_retain_node_lxms(self, retain):
|
|
571
|
+
if retain == True:
|
|
572
|
+
self.retain_synced_on_node = True
|
|
573
|
+
else:
|
|
574
|
+
self.retain_synced_on_node = False
|
|
575
|
+
|
|
576
|
+
def set_authentication(self, required=None):
|
|
577
|
+
if required != None:
|
|
578
|
+
self.auth_required = required
|
|
579
|
+
|
|
580
|
+
def requires_authentication(self):
|
|
581
|
+
return self.auth_required
|
|
582
|
+
|
|
583
|
+
def allow(self, identity_hash=None):
|
|
584
|
+
if (
|
|
585
|
+
isinstance(identity_hash, bytes)
|
|
586
|
+
and len(identity_hash) == RNS.Identity.TRUNCATED_HASHLENGTH // 8
|
|
587
|
+
):
|
|
588
|
+
if not identity_hash in self.allowed_list:
|
|
589
|
+
self.allowed_list.append(identity_hash)
|
|
590
|
+
else:
|
|
591
|
+
raise ValueError(
|
|
592
|
+
"Allowed identity hash must be "
|
|
593
|
+
+ str(RNS.Identity.TRUNCATED_HASHLENGTH // 8)
|
|
594
|
+
+ " bytes"
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
def disallow(self, identity_hash=None):
|
|
598
|
+
if (
|
|
599
|
+
isinstance(identity_hash, bytes)
|
|
600
|
+
and len(identity_hash) == RNS.Identity.TRUNCATED_HASHLENGTH // 8
|
|
601
|
+
):
|
|
602
|
+
if identity_hash in self.allowed_list:
|
|
603
|
+
self.allowed_list.pop(identity_hash)
|
|
604
|
+
else:
|
|
605
|
+
raise ValueError(
|
|
606
|
+
"Disallowed identity hash must be "
|
|
607
|
+
+ str(RNS.Identity.TRUNCATED_HASHLENGTH // 8)
|
|
608
|
+
+ " bytes"
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
def allow_control(self, identity_hash=None):
|
|
612
|
+
if (
|
|
613
|
+
isinstance(identity_hash, bytes)
|
|
614
|
+
and len(identity_hash) == RNS.Identity.TRUNCATED_HASHLENGTH // 8
|
|
615
|
+
):
|
|
616
|
+
if not identity_hash in self.control_allowed_list:
|
|
617
|
+
self.control_allowed_list.append(identity_hash)
|
|
618
|
+
else:
|
|
619
|
+
raise ValueError(
|
|
620
|
+
"Allowed identity hash must be "
|
|
621
|
+
+ str(RNS.Identity.TRUNCATED_HASHLENGTH // 8)
|
|
622
|
+
+ " bytes"
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
def disallow_control(self, identity_hash=None):
|
|
626
|
+
if (
|
|
627
|
+
isinstance(identity_hash, bytes)
|
|
628
|
+
and len(identity_hash) == RNS.Identity.TRUNCATED_HASHLENGTH // 8
|
|
629
|
+
):
|
|
630
|
+
if identity_hash in self.control_allowed_list:
|
|
631
|
+
self.control_allowed_list.pop(identity_hash)
|
|
632
|
+
else:
|
|
633
|
+
raise ValueError(
|
|
634
|
+
"Disallowed identity hash must be "
|
|
635
|
+
+ str(RNS.Identity.TRUNCATED_HASHLENGTH // 8)
|
|
636
|
+
+ " bytes"
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
def prioritise(self, destination_hash=None):
|
|
640
|
+
if (
|
|
641
|
+
isinstance(destination_hash, bytes)
|
|
642
|
+
and len(destination_hash) == RNS.Reticulum.TRUNCATED_HASHLENGTH // 8
|
|
643
|
+
):
|
|
644
|
+
if not destination_hash in self.prioritised_list:
|
|
645
|
+
self.prioritised_list.append(destination_hash)
|
|
646
|
+
else:
|
|
647
|
+
raise ValueError(
|
|
648
|
+
"Prioritised destination hash must be "
|
|
649
|
+
+ str(RNS.Reticulum.TRUNCATED_HASHLENGTH // 8)
|
|
650
|
+
+ " bytes"
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
def unprioritise(self, destination_hash=None):
|
|
654
|
+
if (
|
|
655
|
+
isinstance(destination_hash, bytes)
|
|
656
|
+
and len(destination_hash) == RNS.Reticulum.TRUNCATED_HASHLENGTH // 8
|
|
657
|
+
):
|
|
658
|
+
if destination_hash in self.prioritised_list:
|
|
659
|
+
self.prioritised_list.pop(destination_hash)
|
|
660
|
+
else:
|
|
661
|
+
raise ValueError(
|
|
662
|
+
"Prioritised destination hash must be "
|
|
663
|
+
+ str(RNS.Reticulum.TRUNCATED_HASHLENGTH // 8)
|
|
664
|
+
+ " bytes"
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
def request_messages_from_propagation_node(
|
|
668
|
+
self, identity, max_messages=PR_ALL_MESSAGES
|
|
669
|
+
):
|
|
670
|
+
if max_messages == None:
|
|
671
|
+
max_messages = LXMRouter.PR_ALL_MESSAGES
|
|
672
|
+
|
|
673
|
+
self.propagation_transfer_progress = 0.0
|
|
674
|
+
self.propagation_transfer_max_messages = max_messages
|
|
675
|
+
if self.outbound_propagation_node != None:
|
|
676
|
+
if (
|
|
677
|
+
self.outbound_propagation_link != None
|
|
678
|
+
and self.outbound_propagation_link.status == RNS.Link.ACTIVE
|
|
679
|
+
):
|
|
680
|
+
self.propagation_transfer_state = LXMRouter.PR_LINK_ESTABLISHED
|
|
681
|
+
RNS.log("Requesting message list from propagation node", RNS.LOG_DEBUG)
|
|
682
|
+
self.outbound_propagation_link.identify(identity)
|
|
683
|
+
self.outbound_propagation_link.request(
|
|
684
|
+
LXMPeer.MESSAGE_GET_PATH,
|
|
685
|
+
[
|
|
686
|
+
None,
|
|
687
|
+
None,
|
|
688
|
+
], # Set both want and have fields to None to get message list
|
|
689
|
+
response_callback=self.message_list_response,
|
|
690
|
+
failed_callback=self.message_get_failed,
|
|
691
|
+
)
|
|
692
|
+
self.propagation_transfer_state = LXMRouter.PR_REQUEST_SENT
|
|
693
|
+
else:
|
|
694
|
+
if self.outbound_propagation_link == None:
|
|
695
|
+
if RNS.Transport.has_path(self.outbound_propagation_node):
|
|
696
|
+
self.wants_download_on_path_available_from = None
|
|
697
|
+
self.propagation_transfer_state = LXMRouter.PR_LINK_ESTABLISHING
|
|
698
|
+
RNS.log(
|
|
699
|
+
"Establishing link to "
|
|
700
|
+
+ RNS.prettyhexrep(self.outbound_propagation_node)
|
|
701
|
+
+ " for message download",
|
|
702
|
+
RNS.LOG_DEBUG,
|
|
703
|
+
)
|
|
704
|
+
propagation_node_identity = RNS.Identity.recall(
|
|
705
|
+
self.outbound_propagation_node
|
|
706
|
+
)
|
|
707
|
+
propagation_node_destination = RNS.Destination(
|
|
708
|
+
propagation_node_identity,
|
|
709
|
+
RNS.Destination.OUT,
|
|
710
|
+
RNS.Destination.SINGLE,
|
|
711
|
+
APP_NAME,
|
|
712
|
+
"propagation",
|
|
713
|
+
)
|
|
714
|
+
|
|
715
|
+
def msg_request_established_callback(link):
|
|
716
|
+
self.request_messages_from_propagation_node(
|
|
717
|
+
identity, self.propagation_transfer_max_messages
|
|
718
|
+
)
|
|
719
|
+
|
|
720
|
+
self.outbound_propagation_link = RNS.Link(
|
|
721
|
+
propagation_node_destination,
|
|
722
|
+
established_callback=msg_request_established_callback,
|
|
723
|
+
)
|
|
724
|
+
else:
|
|
725
|
+
RNS.log(
|
|
726
|
+
"No path known for message download from propagation node "
|
|
727
|
+
+ RNS.prettyhexrep(self.outbound_propagation_node)
|
|
728
|
+
+ ". Requesting path...",
|
|
729
|
+
RNS.LOG_DEBUG,
|
|
730
|
+
)
|
|
731
|
+
RNS.Transport.request_path(self.outbound_propagation_node)
|
|
732
|
+
self.wants_download_on_path_available_from = (
|
|
733
|
+
self.outbound_propagation_node
|
|
734
|
+
)
|
|
735
|
+
self.wants_download_on_path_available_to = identity
|
|
736
|
+
self.wants_download_on_path_available_timeout = (
|
|
737
|
+
time.time() + LXMRouter.PR_PATH_TIMEOUT
|
|
738
|
+
)
|
|
739
|
+
self.propagation_transfer_state = LXMRouter.PR_PATH_REQUESTED
|
|
740
|
+
self.request_messages_path_job()
|
|
741
|
+
else:
|
|
742
|
+
RNS.log(
|
|
743
|
+
"Waiting for propagation node link to become active",
|
|
744
|
+
RNS.LOG_EXTREME,
|
|
745
|
+
)
|
|
746
|
+
else:
|
|
747
|
+
RNS.log(
|
|
748
|
+
"Cannot request LXMF propagation node sync, no default propagation node configured",
|
|
749
|
+
RNS.LOG_WARNING,
|
|
750
|
+
)
|
|
751
|
+
|
|
752
|
+
def cancel_propagation_node_requests(self):
|
|
753
|
+
if self.outbound_propagation_link != None:
|
|
754
|
+
self.outbound_propagation_link.teardown()
|
|
755
|
+
self.outbound_propagation_link = None
|
|
756
|
+
|
|
757
|
+
self.acknowledge_sync_completion(reset_state=True)
|
|
758
|
+
|
|
759
|
+
def enable_propagation(self):
|
|
760
|
+
try:
|
|
761
|
+
self.messagepath = self.storagepath + "/messagestore"
|
|
762
|
+
|
|
763
|
+
if not os.path.isdir(self.storagepath):
|
|
764
|
+
os.makedirs(self.storagepath)
|
|
765
|
+
|
|
766
|
+
if not os.path.isdir(self.messagepath):
|
|
767
|
+
os.makedirs(self.messagepath)
|
|
768
|
+
|
|
769
|
+
self.propagation_entries = {}
|
|
770
|
+
|
|
771
|
+
st = time.time()
|
|
772
|
+
RNS.log("Indexing messagestore...", RNS.LOG_NOTICE)
|
|
773
|
+
for filename in os.listdir(self.messagepath):
|
|
774
|
+
components = filename.split("_")
|
|
775
|
+
if len(components) >= 3:
|
|
776
|
+
if float(components[1]) > 0:
|
|
777
|
+
if len(components[0]) == RNS.Identity.HASHLENGTH // 8 * 2:
|
|
778
|
+
try:
|
|
779
|
+
transient_id = bytes.fromhex(components[0])
|
|
780
|
+
received = float(components[1])
|
|
781
|
+
stamp_value = int(components[2])
|
|
782
|
+
filepath = self.messagepath + "/" + filename
|
|
783
|
+
msg_size = os.path.getsize(filepath)
|
|
784
|
+
file = open(filepath, "rb")
|
|
785
|
+
destination_hash = file.read(
|
|
786
|
+
LXMessage.DESTINATION_LENGTH
|
|
787
|
+
)
|
|
788
|
+
file.close()
|
|
789
|
+
|
|
790
|
+
self.propagation_entries[transient_id] = [
|
|
791
|
+
destination_hash, # 0: Destination hash
|
|
792
|
+
filepath, # 1: Storage location
|
|
793
|
+
received, # 2: Receive timestamp
|
|
794
|
+
msg_size, # 3: Message size
|
|
795
|
+
[], # 4: Handled peers
|
|
796
|
+
[], # 5: Unhandled peers
|
|
797
|
+
stamp_value, # 6: Stamp value
|
|
798
|
+
]
|
|
799
|
+
|
|
800
|
+
except Exception as e:
|
|
801
|
+
RNS.log(
|
|
802
|
+
"Could not read LXM from message store. The contained exception was: "
|
|
803
|
+
+ str(e),
|
|
804
|
+
RNS.LOG_ERROR,
|
|
805
|
+
)
|
|
806
|
+
|
|
807
|
+
et = time.time()
|
|
808
|
+
mps = (
|
|
809
|
+
0
|
|
810
|
+
if et - st == 0
|
|
811
|
+
else math.floor(len(self.propagation_entries) / (et - st))
|
|
812
|
+
)
|
|
813
|
+
RNS.log(
|
|
814
|
+
f"Indexed {len(self.propagation_entries)} messages in {RNS.prettytime(et-st)}, {mps} msgs/s",
|
|
815
|
+
RNS.LOG_NOTICE,
|
|
816
|
+
)
|
|
817
|
+
RNS.log("Rebuilding peer synchronisation states...", RNS.LOG_NOTICE)
|
|
818
|
+
st = time.time()
|
|
819
|
+
|
|
820
|
+
if os.path.isfile(self.storagepath + "/peers"):
|
|
821
|
+
peers_file = open(self.storagepath + "/peers", "rb")
|
|
822
|
+
peers_data = peers_file.read()
|
|
823
|
+
peers_file.close()
|
|
824
|
+
|
|
825
|
+
if len(peers_data) > 0:
|
|
826
|
+
serialised_peers = msgpack.unpackb(peers_data)
|
|
827
|
+
del peers_data
|
|
828
|
+
|
|
829
|
+
while len(serialised_peers) > 0:
|
|
830
|
+
serialised_peer = serialised_peers.pop()
|
|
831
|
+
peer = LXMPeer.from_bytes(serialised_peer, self)
|
|
832
|
+
del serialised_peer
|
|
833
|
+
if (
|
|
834
|
+
peer.destination_hash in self.static_peers
|
|
835
|
+
and peer.last_heard == 0
|
|
836
|
+
):
|
|
837
|
+
RNS.Transport.request_path(peer.destination_hash)
|
|
838
|
+
if peer.identity != None:
|
|
839
|
+
self.peers[peer.destination_hash] = peer
|
|
840
|
+
lim_str = ", no transfer limit"
|
|
841
|
+
if peer.propagation_transfer_limit != None:
|
|
842
|
+
lim_str = (
|
|
843
|
+
", "
|
|
844
|
+
+ RNS.prettysize(
|
|
845
|
+
peer.propagation_transfer_limit * 1000
|
|
846
|
+
)
|
|
847
|
+
+ " transfer limit"
|
|
848
|
+
)
|
|
849
|
+
RNS.log(
|
|
850
|
+
"Rebuilt peer "
|
|
851
|
+
+ RNS.prettyhexrep(peer.destination_hash)
|
|
852
|
+
+ " with "
|
|
853
|
+
+ str(peer.unhandled_message_count)
|
|
854
|
+
+ " unhandled messages"
|
|
855
|
+
+ lim_str,
|
|
856
|
+
RNS.LOG_DEBUG,
|
|
857
|
+
)
|
|
858
|
+
else:
|
|
859
|
+
RNS.log(
|
|
860
|
+
"Peer "
|
|
861
|
+
+ RNS.prettyhexrep(peer.destination_hash)
|
|
862
|
+
+ " could not be loaded, because its identity could not be recalled. Dropping peer.",
|
|
863
|
+
RNS.LOG_DEBUG,
|
|
864
|
+
)
|
|
865
|
+
del peer
|
|
866
|
+
|
|
867
|
+
del serialised_peers
|
|
868
|
+
|
|
869
|
+
if len(self.static_peers) > 0:
|
|
870
|
+
for static_peer in self.static_peers:
|
|
871
|
+
if not static_peer in self.peers:
|
|
872
|
+
RNS.log(
|
|
873
|
+
f"Activating static peering with {RNS.prettyhexrep(static_peer)}",
|
|
874
|
+
RNS.LOG_NOTICE,
|
|
875
|
+
)
|
|
876
|
+
self.peers[static_peer] = LXMPeer(
|
|
877
|
+
self, static_peer, sync_strategy=self.default_sync_strategy
|
|
878
|
+
)
|
|
879
|
+
if self.peers[static_peer].last_heard == 0:
|
|
880
|
+
# TODO: Allow path request responses through announce handler
|
|
881
|
+
# momentarily here, so peering config can be updated even if
|
|
882
|
+
# the static peer is not available to directly send an announce.
|
|
883
|
+
RNS.Transport.request_path(static_peer)
|
|
884
|
+
|
|
885
|
+
RNS.log(
|
|
886
|
+
f"Rebuilt synchronisation state for {len(self.peers)} peers in {RNS.prettytime(time.time()-st)}",
|
|
887
|
+
RNS.LOG_NOTICE,
|
|
888
|
+
)
|
|
889
|
+
|
|
890
|
+
try:
|
|
891
|
+
if os.path.isfile(self.storagepath + "/node_stats"):
|
|
892
|
+
node_stats_file = open(self.storagepath + "/node_stats", "rb")
|
|
893
|
+
data = node_stats_file.read()
|
|
894
|
+
node_stats_file.close()
|
|
895
|
+
node_stats = msgpack.unpackb(data)
|
|
896
|
+
|
|
897
|
+
if not type(node_stats) == dict:
|
|
898
|
+
RNS.log(
|
|
899
|
+
"Invalid data format for loaded local node stats, node stats will be reset",
|
|
900
|
+
RNS.LOG_ERROR,
|
|
901
|
+
)
|
|
902
|
+
else:
|
|
903
|
+
self.client_propagation_messages_received = node_stats[
|
|
904
|
+
"client_propagation_messages_received"
|
|
905
|
+
]
|
|
906
|
+
self.client_propagation_messages_served = node_stats[
|
|
907
|
+
"client_propagation_messages_served"
|
|
908
|
+
]
|
|
909
|
+
self.unpeered_propagation_incoming = node_stats[
|
|
910
|
+
"unpeered_propagation_incoming"
|
|
911
|
+
]
|
|
912
|
+
self.unpeered_propagation_rx_bytes = node_stats[
|
|
913
|
+
"unpeered_propagation_rx_bytes"
|
|
914
|
+
]
|
|
915
|
+
|
|
916
|
+
except Exception as e:
|
|
917
|
+
RNS.log(
|
|
918
|
+
"Could not load local node stats. The contained exception was: "
|
|
919
|
+
+ str(e),
|
|
920
|
+
RNS.LOG_ERROR,
|
|
921
|
+
)
|
|
922
|
+
|
|
923
|
+
self.propagation_node = True
|
|
924
|
+
self.propagation_node_start_time = time.time()
|
|
925
|
+
self.propagation_destination.set_link_established_callback(
|
|
926
|
+
self.propagation_link_established
|
|
927
|
+
)
|
|
928
|
+
self.propagation_destination.set_packet_callback(self.propagation_packet)
|
|
929
|
+
|
|
930
|
+
self.propagation_destination.register_request_handler(
|
|
931
|
+
LXMPeer.OFFER_REQUEST_PATH,
|
|
932
|
+
self.offer_request,
|
|
933
|
+
allow=RNS.Destination.ALLOW_ALL,
|
|
934
|
+
)
|
|
935
|
+
self.propagation_destination.register_request_handler(
|
|
936
|
+
LXMPeer.MESSAGE_GET_PATH,
|
|
937
|
+
self.message_get_request,
|
|
938
|
+
allow=RNS.Destination.ALLOW_ALL,
|
|
939
|
+
)
|
|
940
|
+
|
|
941
|
+
self.control_allowed_list = [self.identity.hash]
|
|
942
|
+
self.control_destination = RNS.Destination(
|
|
943
|
+
self.identity,
|
|
944
|
+
RNS.Destination.IN,
|
|
945
|
+
RNS.Destination.SINGLE,
|
|
946
|
+
APP_NAME,
|
|
947
|
+
"propagation",
|
|
948
|
+
"control",
|
|
949
|
+
)
|
|
950
|
+
self.control_destination.register_request_handler(
|
|
951
|
+
LXMRouter.STATS_GET_PATH,
|
|
952
|
+
self.stats_get_request,
|
|
953
|
+
allow=RNS.Destination.ALLOW_LIST,
|
|
954
|
+
allowed_list=self.control_allowed_list,
|
|
955
|
+
)
|
|
956
|
+
self.control_destination.register_request_handler(
|
|
957
|
+
LXMRouter.SYNC_REQUEST_PATH,
|
|
958
|
+
self.peer_sync_request,
|
|
959
|
+
allow=RNS.Destination.ALLOW_LIST,
|
|
960
|
+
allowed_list=self.control_allowed_list,
|
|
961
|
+
)
|
|
962
|
+
self.control_destination.register_request_handler(
|
|
963
|
+
LXMRouter.UNPEER_REQUEST_PATH,
|
|
964
|
+
self.peer_unpeer_request,
|
|
965
|
+
allow=RNS.Destination.ALLOW_LIST,
|
|
966
|
+
allowed_list=self.control_allowed_list,
|
|
967
|
+
)
|
|
968
|
+
|
|
969
|
+
if self.message_storage_limit != None:
|
|
970
|
+
limit_str = ", limit is " + RNS.prettysize(self.message_storage_limit)
|
|
971
|
+
else:
|
|
972
|
+
limit_str = ""
|
|
973
|
+
|
|
974
|
+
RNS.log(
|
|
975
|
+
"LXMF Propagation Node message store size is "
|
|
976
|
+
+ RNS.prettysize(self.message_storage_size())
|
|
977
|
+
+ limit_str,
|
|
978
|
+
RNS.LOG_DEBUG,
|
|
979
|
+
)
|
|
980
|
+
|
|
981
|
+
self.announce_propagation_node()
|
|
982
|
+
|
|
983
|
+
except Exception as e:
|
|
984
|
+
RNS.log(
|
|
985
|
+
"Could not enable propagation node. The contained exception was: "
|
|
986
|
+
+ str(e),
|
|
987
|
+
RNS.LOG_ERROR,
|
|
988
|
+
)
|
|
989
|
+
raise e
|
|
990
|
+
RNS.panic()
|
|
991
|
+
|
|
992
|
+
def disable_propagation(self):
|
|
993
|
+
self.propagation_node = False
|
|
994
|
+
self.announce_propagation_node()
|
|
995
|
+
|
|
996
|
+
def enforce_stamps(self):
|
|
997
|
+
self._enforce_stamps = True
|
|
998
|
+
|
|
999
|
+
def ignore_stamps(self):
|
|
1000
|
+
self._enforce_stamps = False
|
|
1001
|
+
|
|
1002
|
+
def ignore_destination(self, destination_hash):
|
|
1003
|
+
if not destination_hash in self.ignored_list:
|
|
1004
|
+
self.ignored_list.append(destination_hash)
|
|
1005
|
+
|
|
1006
|
+
def unignore_destination(self, destination_hash):
|
|
1007
|
+
if destination_hash in self.ignored_list:
|
|
1008
|
+
self.ignored_list.remove(destination_hash)
|
|
1009
|
+
|
|
1010
|
+
def set_message_storage_limit(self, kilobytes=None, megabytes=None, gigabytes=None):
|
|
1011
|
+
limit_bytes = 0
|
|
1012
|
+
|
|
1013
|
+
if kilobytes != None:
|
|
1014
|
+
limit_bytes += kilobytes * 1000
|
|
1015
|
+
|
|
1016
|
+
if megabytes != None:
|
|
1017
|
+
limit_bytes += megabytes * 1000 * 1000
|
|
1018
|
+
|
|
1019
|
+
if gigabytes != None:
|
|
1020
|
+
limit_bytes += gigabytes * 1000 * 1000 * 1000
|
|
1021
|
+
|
|
1022
|
+
if limit_bytes == 0:
|
|
1023
|
+
limit_bytes = None
|
|
1024
|
+
|
|
1025
|
+
try:
|
|
1026
|
+
if limit_bytes == None or int(limit_bytes) > 0:
|
|
1027
|
+
self._message_storage_limit = int(limit_bytes)
|
|
1028
|
+
else:
|
|
1029
|
+
raise ValueError(
|
|
1030
|
+
"Cannot set LXMF information storage limit to " + str(limit_bytes)
|
|
1031
|
+
)
|
|
1032
|
+
|
|
1033
|
+
except Exception as e:
|
|
1034
|
+
raise ValueError(
|
|
1035
|
+
"Cannot set LXMF information storage limit to " + str(limit_bytes)
|
|
1036
|
+
)
|
|
1037
|
+
|
|
1038
|
+
@property
|
|
1039
|
+
def message_storage_limit(self):
|
|
1040
|
+
return self._message_storage_limit
|
|
1041
|
+
|
|
1042
|
+
def message_storage_size(self):
|
|
1043
|
+
if self.propagation_node:
|
|
1044
|
+
return sum(self.propagation_entries[f][3] for f in self.propagation_entries)
|
|
1045
|
+
else:
|
|
1046
|
+
return None
|
|
1047
|
+
|
|
1048
|
+
def set_information_storage_limit(
|
|
1049
|
+
self, kilobytes=None, megabytes=None, gigabytes=None
|
|
1050
|
+
):
|
|
1051
|
+
limit_bytes = 0
|
|
1052
|
+
if kilobytes != None:
|
|
1053
|
+
limit_bytes += kilobytes * 1000
|
|
1054
|
+
if megabytes != None:
|
|
1055
|
+
limit_bytes += megabytes * 1000 * 1000
|
|
1056
|
+
if gigabytes != None:
|
|
1057
|
+
limit_bytes += gigabytes * 1000 * 1000 * 1000
|
|
1058
|
+
if limit_bytes == 0:
|
|
1059
|
+
limit_bytes = None
|
|
1060
|
+
|
|
1061
|
+
try:
|
|
1062
|
+
if limit_bytes == None or int(limit_bytes) > 0:
|
|
1063
|
+
self._information_storage_limit = int(limit_bytes)
|
|
1064
|
+
else:
|
|
1065
|
+
raise ValueError(
|
|
1066
|
+
"Cannot set LXMF information storage limit to " + str(limit_bytes)
|
|
1067
|
+
)
|
|
1068
|
+
except Exception as e:
|
|
1069
|
+
raise ValueError(
|
|
1070
|
+
"Cannot set LXMF information storage limit to " + str(limit_bytes)
|
|
1071
|
+
)
|
|
1072
|
+
|
|
1073
|
+
@property
|
|
1074
|
+
def information_storage_limit(self):
|
|
1075
|
+
return self._information_storage_limit
|
|
1076
|
+
|
|
1077
|
+
def information_storage_size(self):
|
|
1078
|
+
pass
|
|
1079
|
+
|
|
1080
|
+
def delivery_link_available(self, destination_hash):
|
|
1081
|
+
if (
|
|
1082
|
+
destination_hash in self.direct_links
|
|
1083
|
+
or destination_hash in self.backchannel_links
|
|
1084
|
+
):
|
|
1085
|
+
return True
|
|
1086
|
+
else:
|
|
1087
|
+
return False
|
|
1088
|
+
|
|
1089
|
+
### Propagation Node Control ##########################
|
|
1090
|
+
#######################################################
|
|
1091
|
+
|
|
1092
|
+
def compile_stats(self):
|
|
1093
|
+
if not self.propagation_node:
|
|
1094
|
+
return None
|
|
1095
|
+
else:
|
|
1096
|
+
peer_stats = {}
|
|
1097
|
+
for peer_id in self.peers.copy():
|
|
1098
|
+
peer = self.peers[peer_id]
|
|
1099
|
+
peer_stats[peer_id] = {
|
|
1100
|
+
"type": "static" if peer_id in self.static_peers else "discovered",
|
|
1101
|
+
"state": peer.state,
|
|
1102
|
+
"alive": peer.alive,
|
|
1103
|
+
"name": peer.name,
|
|
1104
|
+
"last_heard": int(peer.last_heard),
|
|
1105
|
+
"next_sync_attempt": peer.next_sync_attempt,
|
|
1106
|
+
"last_sync_attempt": peer.last_sync_attempt,
|
|
1107
|
+
"sync_backoff": peer.sync_backoff,
|
|
1108
|
+
"peering_timebase": peer.peering_timebase,
|
|
1109
|
+
"ler": int(peer.link_establishment_rate),
|
|
1110
|
+
"str": int(peer.sync_transfer_rate),
|
|
1111
|
+
"transfer_limit": peer.propagation_transfer_limit,
|
|
1112
|
+
"sync_limit": peer.propagation_sync_limit,
|
|
1113
|
+
"target_stamp_cost": peer.propagation_stamp_cost,
|
|
1114
|
+
"stamp_cost_flexibility": peer.propagation_stamp_cost_flexibility,
|
|
1115
|
+
"peering_cost": peer.peering_cost,
|
|
1116
|
+
"peering_key": peer.peering_key_value(),
|
|
1117
|
+
"network_distance": RNS.Transport.hops_to(peer_id),
|
|
1118
|
+
"rx_bytes": peer.rx_bytes,
|
|
1119
|
+
"tx_bytes": peer.tx_bytes,
|
|
1120
|
+
"acceptance_rate": peer.acceptance_rate,
|
|
1121
|
+
"messages": {
|
|
1122
|
+
"offered": peer.offered,
|
|
1123
|
+
"outgoing": peer.outgoing,
|
|
1124
|
+
"incoming": peer.incoming,
|
|
1125
|
+
"unhandled": peer.unhandled_message_count,
|
|
1126
|
+
},
|
|
1127
|
+
}
|
|
1128
|
+
|
|
1129
|
+
node_stats = {
|
|
1130
|
+
"identity_hash": self.identity.hash,
|
|
1131
|
+
"destination_hash": self.propagation_destination.hash,
|
|
1132
|
+
"uptime": time.time() - self.propagation_node_start_time,
|
|
1133
|
+
"delivery_limit": self.delivery_per_transfer_limit,
|
|
1134
|
+
"propagation_limit": self.propagation_per_transfer_limit,
|
|
1135
|
+
"sync_limit": self.propagation_per_sync_limit,
|
|
1136
|
+
"target_stamp_cost": self.propagation_stamp_cost,
|
|
1137
|
+
"stamp_cost_flexibility": self.propagation_stamp_cost_flexibility,
|
|
1138
|
+
"peering_cost": self.peering_cost,
|
|
1139
|
+
"max_peering_cost": self.max_peering_cost,
|
|
1140
|
+
"autopeer_maxdepth": self.autopeer_maxdepth,
|
|
1141
|
+
"from_static_only": self.from_static_only,
|
|
1142
|
+
"messagestore": {
|
|
1143
|
+
"count": len(self.propagation_entries),
|
|
1144
|
+
"bytes": self.message_storage_size(),
|
|
1145
|
+
"limit": self.message_storage_limit,
|
|
1146
|
+
},
|
|
1147
|
+
"clients": {
|
|
1148
|
+
"client_propagation_messages_received": self.client_propagation_messages_received,
|
|
1149
|
+
"client_propagation_messages_served": self.client_propagation_messages_served,
|
|
1150
|
+
},
|
|
1151
|
+
"unpeered_propagation_incoming": self.unpeered_propagation_incoming,
|
|
1152
|
+
"unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes,
|
|
1153
|
+
"static_peers": len(self.static_peers),
|
|
1154
|
+
"discovered_peers": len(self.peers) - len(self.static_peers),
|
|
1155
|
+
"total_peers": len(self.peers),
|
|
1156
|
+
"max_peers": self.max_peers,
|
|
1157
|
+
"peers": peer_stats,
|
|
1158
|
+
}
|
|
1159
|
+
|
|
1160
|
+
return node_stats
|
|
1161
|
+
|
|
1162
|
+
def stats_get_request(self, path, data, request_id, remote_identity, requested_at):
|
|
1163
|
+
if remote_identity == None:
|
|
1164
|
+
return LXMPeer.ERROR_NO_IDENTITY
|
|
1165
|
+
elif remote_identity.hash not in self.control_allowed_list:
|
|
1166
|
+
return LXMPeer.ERROR_NO_ACCESS
|
|
1167
|
+
else:
|
|
1168
|
+
return self.compile_stats()
|
|
1169
|
+
|
|
1170
|
+
def peer_sync_request(self, path, data, request_id, remote_identity, requested_at):
|
|
1171
|
+
if remote_identity == None:
|
|
1172
|
+
return LXMPeer.ERROR_NO_IDENTITY
|
|
1173
|
+
elif remote_identity.hash not in self.control_allowed_list:
|
|
1174
|
+
return LXMPeer.ERROR_NO_ACCESS
|
|
1175
|
+
else:
|
|
1176
|
+
if type(data) != bytes:
|
|
1177
|
+
return LXMPeer.ERROR_INVALID_DATA
|
|
1178
|
+
elif len(data) != RNS.Identity.TRUNCATED_HASHLENGTH // 8:
|
|
1179
|
+
return LXMPeer.ERROR_INVALID_DATA
|
|
1180
|
+
else:
|
|
1181
|
+
if not data in self.peers:
|
|
1182
|
+
return LXMPeer.ERROR_NOT_FOUND
|
|
1183
|
+
else:
|
|
1184
|
+
self.peers[data].sync()
|
|
1185
|
+
return True
|
|
1186
|
+
|
|
1187
|
+
def peer_unpeer_request(
|
|
1188
|
+
self, path, data, request_id, remote_identity, requested_at
|
|
1189
|
+
):
|
|
1190
|
+
if remote_identity == None:
|
|
1191
|
+
return LXMPeer.ERROR_NO_IDENTITY
|
|
1192
|
+
elif remote_identity.hash not in self.control_allowed_list:
|
|
1193
|
+
return LXMPeer.ERROR_NO_ACCESS
|
|
1194
|
+
else:
|
|
1195
|
+
if type(data) != bytes:
|
|
1196
|
+
return LXMPeer.ERROR_INVALID_DATA
|
|
1197
|
+
elif len(data) != RNS.Identity.TRUNCATED_HASHLENGTH // 8:
|
|
1198
|
+
return LXMPeer.ERROR_INVALID_DATA
|
|
1199
|
+
else:
|
|
1200
|
+
if not data in self.peers:
|
|
1201
|
+
return LXMPeer.ERROR_NOT_FOUND
|
|
1202
|
+
else:
|
|
1203
|
+
self.unpeer(data)
|
|
1204
|
+
return True
|
|
1205
|
+
|
|
1206
|
+
### Utility & Maintenance #############################
|
|
1207
|
+
#######################################################
|
|
1208
|
+
|
|
1209
|
+
JOB_OUTBOUND_INTERVAL = 1
|
|
1210
|
+
JOB_STAMPS_INTERVAL = 1
|
|
1211
|
+
JOB_LINKS_INTERVAL = 1
|
|
1212
|
+
JOB_TRANSIENT_INTERVAL = 60
|
|
1213
|
+
JOB_STORE_INTERVAL = 120
|
|
1214
|
+
JOB_PEERSYNC_INTERVAL = 6
|
|
1215
|
+
JOB_PEERINGEST_INTERVAL = JOB_PEERSYNC_INTERVAL
|
|
1216
|
+
JOB_ROTATE_INTERVAL = 56 * JOB_PEERINGEST_INTERVAL
|
|
1217
|
+
|
|
1218
|
+
def jobs(self):
|
|
1219
|
+
if not self.exit_handler_running:
|
|
1220
|
+
self.processing_count += 1
|
|
1221
|
+
|
|
1222
|
+
if self.processing_count % LXMRouter.JOB_OUTBOUND_INTERVAL == 0:
|
|
1223
|
+
self.process_outbound()
|
|
1224
|
+
|
|
1225
|
+
if self.processing_count % LXMRouter.JOB_STAMPS_INTERVAL == 0:
|
|
1226
|
+
threading.Thread(
|
|
1227
|
+
target=self.process_deferred_stamps, daemon=True
|
|
1228
|
+
).start()
|
|
1229
|
+
|
|
1230
|
+
if self.processing_count % LXMRouter.JOB_LINKS_INTERVAL == 0:
|
|
1231
|
+
self.clean_links()
|
|
1232
|
+
|
|
1233
|
+
if self.processing_count % LXMRouter.JOB_TRANSIENT_INTERVAL == 0:
|
|
1234
|
+
self.clean_transient_id_caches()
|
|
1235
|
+
|
|
1236
|
+
if self.processing_count % LXMRouter.JOB_STORE_INTERVAL == 0:
|
|
1237
|
+
if self.propagation_node == True:
|
|
1238
|
+
self.clean_message_store()
|
|
1239
|
+
|
|
1240
|
+
if self.processing_count % LXMRouter.JOB_PEERINGEST_INTERVAL == 0:
|
|
1241
|
+
if self.propagation_node == True:
|
|
1242
|
+
self.flush_queues()
|
|
1243
|
+
|
|
1244
|
+
if self.processing_count % LXMRouter.JOB_ROTATE_INTERVAL == 0:
|
|
1245
|
+
if self.propagation_node == True:
|
|
1246
|
+
self.rotate_peers()
|
|
1247
|
+
|
|
1248
|
+
if self.processing_count % LXMRouter.JOB_PEERSYNC_INTERVAL == 0:
|
|
1249
|
+
if self.propagation_node == True:
|
|
1250
|
+
self.sync_peers()
|
|
1251
|
+
self.clean_throttled_peers()
|
|
1252
|
+
|
|
1253
|
+
def jobloop(self):
|
|
1254
|
+
while True:
|
|
1255
|
+
# TODO: Improve this to scheduling, so manual
|
|
1256
|
+
# triggers can delay next run
|
|
1257
|
+
try:
|
|
1258
|
+
self.jobs()
|
|
1259
|
+
except Exception as e:
|
|
1260
|
+
RNS.log(
|
|
1261
|
+
"An error ocurred while running LXMF Router jobs.", RNS.LOG_ERROR
|
|
1262
|
+
)
|
|
1263
|
+
RNS.log("The contained exception was: " + str(e), RNS.LOG_ERROR)
|
|
1264
|
+
RNS.trace_exception(e)
|
|
1265
|
+
time.sleep(LXMRouter.PROCESSING_INTERVAL)
|
|
1266
|
+
|
|
1267
|
+
def flush_queues(self):
|
|
1268
|
+
if len(self.peers) > 0:
|
|
1269
|
+
self.flush_peer_distribution_queue()
|
|
1270
|
+
RNS.log("Calculating peer distribution queue mappings...", RNS.LOG_DEBUG)
|
|
1271
|
+
st = time.time()
|
|
1272
|
+
for peer_id in self.peers.copy():
|
|
1273
|
+
if peer_id in self.peers:
|
|
1274
|
+
peer = self.peers[peer_id]
|
|
1275
|
+
if peer.queued_items():
|
|
1276
|
+
peer.process_queues()
|
|
1277
|
+
|
|
1278
|
+
RNS.log(
|
|
1279
|
+
f"Distribution queue mapping completed in {RNS.prettytime(time.time()-st)}",
|
|
1280
|
+
RNS.LOG_DEBUG,
|
|
1281
|
+
)
|
|
1282
|
+
|
|
1283
|
+
def clean_links(self):
|
|
1284
|
+
closed_links = []
|
|
1285
|
+
for link_hash in self.direct_links:
|
|
1286
|
+
link = self.direct_links[link_hash]
|
|
1287
|
+
inactive_time = link.no_data_for()
|
|
1288
|
+
|
|
1289
|
+
if inactive_time > LXMRouter.LINK_MAX_INACTIVITY:
|
|
1290
|
+
link.teardown()
|
|
1291
|
+
closed_links.append(link_hash)
|
|
1292
|
+
if link.link_id in self.validated_peer_links:
|
|
1293
|
+
self.validated_peer_links.pop(link.link_id)
|
|
1294
|
+
|
|
1295
|
+
for link_hash in closed_links:
|
|
1296
|
+
cleaned_link = self.direct_links.pop(link_hash)
|
|
1297
|
+
RNS.log("Cleaned link " + str(cleaned_link), RNS.LOG_DEBUG)
|
|
1298
|
+
|
|
1299
|
+
try:
|
|
1300
|
+
inactive_links = []
|
|
1301
|
+
for link in self.active_propagation_links:
|
|
1302
|
+
if link.no_data_for() > LXMRouter.P_LINK_MAX_INACTIVITY:
|
|
1303
|
+
inactive_links.append(link)
|
|
1304
|
+
|
|
1305
|
+
for link in inactive_links:
|
|
1306
|
+
self.active_propagation_links.remove(link)
|
|
1307
|
+
link.teardown()
|
|
1308
|
+
|
|
1309
|
+
except Exception as e:
|
|
1310
|
+
RNS.log(
|
|
1311
|
+
"An error occurred while cleaning inbound propagation links. The contained exception was: "
|
|
1312
|
+
+ str(e),
|
|
1313
|
+
RNS.LOG_ERROR,
|
|
1314
|
+
)
|
|
1315
|
+
|
|
1316
|
+
if (
|
|
1317
|
+
self.outbound_propagation_link != None
|
|
1318
|
+
and self.outbound_propagation_link.status == RNS.Link.CLOSED
|
|
1319
|
+
):
|
|
1320
|
+
self.outbound_propagation_link = None
|
|
1321
|
+
if self.propagation_transfer_state == LXMRouter.PR_COMPLETE:
|
|
1322
|
+
self.acknowledge_sync_completion()
|
|
1323
|
+
elif self.propagation_transfer_state < LXMRouter.PR_LINK_ESTABLISHED:
|
|
1324
|
+
self.acknowledge_sync_completion(failure_state=LXMRouter.PR_LINK_FAILED)
|
|
1325
|
+
elif (
|
|
1326
|
+
self.propagation_transfer_state >= LXMRouter.PR_LINK_ESTABLISHED
|
|
1327
|
+
and self.propagation_transfer_state < LXMRouter.PR_COMPLETE
|
|
1328
|
+
):
|
|
1329
|
+
self.acknowledge_sync_completion(
|
|
1330
|
+
failure_state=LXMRouter.PR_TRANSFER_FAILED
|
|
1331
|
+
)
|
|
1332
|
+
else:
|
|
1333
|
+
RNS.log(
|
|
1334
|
+
f"Unknown propagation transfer state on link cleaning: {self.propagation_transfer_state}",
|
|
1335
|
+
RNS.LOG_DEBUG,
|
|
1336
|
+
)
|
|
1337
|
+
self.acknowledge_sync_completion()
|
|
1338
|
+
|
|
1339
|
+
RNS.log("Cleaned outbound propagation link", RNS.LOG_DEBUG)
|
|
1340
|
+
|
|
1341
|
+
def clean_transient_id_caches(self):
|
|
1342
|
+
now = time.time()
|
|
1343
|
+
removed_entries = []
|
|
1344
|
+
for transient_id in self.locally_delivered_transient_ids:
|
|
1345
|
+
timestamp = self.locally_delivered_transient_ids[transient_id]
|
|
1346
|
+
if now > timestamp + LXMRouter.MESSAGE_EXPIRY * 6.0:
|
|
1347
|
+
removed_entries.append(transient_id)
|
|
1348
|
+
|
|
1349
|
+
for transient_id in removed_entries:
|
|
1350
|
+
self.locally_delivered_transient_ids.pop(transient_id)
|
|
1351
|
+
RNS.log(
|
|
1352
|
+
"Cleaned "
|
|
1353
|
+
+ RNS.prettyhexrep(transient_id)
|
|
1354
|
+
+ " from local delivery cache",
|
|
1355
|
+
RNS.LOG_DEBUG,
|
|
1356
|
+
)
|
|
1357
|
+
|
|
1358
|
+
removed_entries = []
|
|
1359
|
+
for transient_id in self.locally_processed_transient_ids:
|
|
1360
|
+
timestamp = self.locally_processed_transient_ids[transient_id]
|
|
1361
|
+
if now > timestamp + LXMRouter.MESSAGE_EXPIRY * 6.0:
|
|
1362
|
+
removed_entries.append(transient_id)
|
|
1363
|
+
|
|
1364
|
+
for transient_id in removed_entries:
|
|
1365
|
+
self.locally_processed_transient_ids.pop(transient_id)
|
|
1366
|
+
RNS.log(
|
|
1367
|
+
"Cleaned "
|
|
1368
|
+
+ RNS.prettyhexrep(transient_id)
|
|
1369
|
+
+ " from locally processed cache",
|
|
1370
|
+
RNS.LOG_DEBUG,
|
|
1371
|
+
)
|
|
1372
|
+
|
|
1373
|
+
def update_stamp_cost(self, destination_hash, stamp_cost):
|
|
1374
|
+
RNS.log(
|
|
1375
|
+
f"Updating outbound stamp cost for {RNS.prettyhexrep(destination_hash)} to {stamp_cost}",
|
|
1376
|
+
RNS.LOG_DEBUG,
|
|
1377
|
+
)
|
|
1378
|
+
self.outbound_stamp_costs[destination_hash] = [time.time(), stamp_cost]
|
|
1379
|
+
|
|
1380
|
+
def job():
|
|
1381
|
+
self.save_outbound_stamp_costs()
|
|
1382
|
+
|
|
1383
|
+
threading.Thread(target=self.save_outbound_stamp_costs, daemon=True).start()
|
|
1384
|
+
|
|
1385
|
+
def get_announce_app_data(self, destination_hash):
|
|
1386
|
+
if destination_hash in self.delivery_destinations:
|
|
1387
|
+
delivery_destination = self.delivery_destinations[destination_hash]
|
|
1388
|
+
|
|
1389
|
+
display_name = None
|
|
1390
|
+
if delivery_destination.display_name != None:
|
|
1391
|
+
display_name = delivery_destination.display_name.encode("utf-8")
|
|
1392
|
+
|
|
1393
|
+
stamp_cost = None
|
|
1394
|
+
if (
|
|
1395
|
+
delivery_destination.stamp_cost != None
|
|
1396
|
+
and type(delivery_destination.stamp_cost) == int
|
|
1397
|
+
):
|
|
1398
|
+
if (
|
|
1399
|
+
delivery_destination.stamp_cost > 0
|
|
1400
|
+
and delivery_destination.stamp_cost < 255
|
|
1401
|
+
):
|
|
1402
|
+
stamp_cost = delivery_destination.stamp_cost
|
|
1403
|
+
|
|
1404
|
+
peer_data = [display_name, stamp_cost]
|
|
1405
|
+
|
|
1406
|
+
return msgpack.packb(peer_data)
|
|
1407
|
+
|
|
1408
|
+
def get_size(self, transient_id):
|
|
1409
|
+
lxm_size = self.propagation_entries[transient_id][3]
|
|
1410
|
+
return lxm_size
|
|
1411
|
+
|
|
1412
|
+
def get_weight(self, transient_id):
|
|
1413
|
+
dst_hash = self.propagation_entries[transient_id][0]
|
|
1414
|
+
lxm_rcvd = self.propagation_entries[transient_id][2]
|
|
1415
|
+
lxm_size = self.propagation_entries[transient_id][3]
|
|
1416
|
+
|
|
1417
|
+
now = time.time()
|
|
1418
|
+
age_weight = max(1, (now - lxm_rcvd) / 60 / 60 / 24 / 4)
|
|
1419
|
+
|
|
1420
|
+
if dst_hash in self.prioritised_list:
|
|
1421
|
+
priority_weight = 0.1
|
|
1422
|
+
else:
|
|
1423
|
+
priority_weight = 1.0
|
|
1424
|
+
|
|
1425
|
+
return priority_weight * age_weight * lxm_size
|
|
1426
|
+
|
|
1427
|
+
def get_stamp_value(self, transient_id):
|
|
1428
|
+
if not transient_id in self.propagation_entries:
|
|
1429
|
+
return None
|
|
1430
|
+
else:
|
|
1431
|
+
return self.propagation_entries[transient_id][6]
|
|
1432
|
+
|
|
1433
|
+
def generate_ticket(self, destination_hash, expiry=LXMessage.TICKET_EXPIRY):
|
|
1434
|
+
now = time.time()
|
|
1435
|
+
ticket = None
|
|
1436
|
+
if destination_hash in self.available_tickets["last_deliveries"]:
|
|
1437
|
+
last_delivery = self.available_tickets["last_deliveries"][destination_hash]
|
|
1438
|
+
elapsed = now - last_delivery
|
|
1439
|
+
if elapsed < LXMessage.TICKET_INTERVAL:
|
|
1440
|
+
RNS.log(
|
|
1441
|
+
f"A ticket for {RNS.prettyhexrep(destination_hash)} was already delivered {RNS.prettytime(elapsed)} ago, not including another ticket yet",
|
|
1442
|
+
RNS.LOG_DEBUG,
|
|
1443
|
+
)
|
|
1444
|
+
return None
|
|
1445
|
+
|
|
1446
|
+
if destination_hash in self.available_tickets["inbound"]:
|
|
1447
|
+
for ticket in self.available_tickets["inbound"][destination_hash]:
|
|
1448
|
+
ticket_entry = self.available_tickets["inbound"][destination_hash][
|
|
1449
|
+
ticket
|
|
1450
|
+
]
|
|
1451
|
+
expires = ticket_entry[0]
|
|
1452
|
+
validity_left = expires - now
|
|
1453
|
+
if validity_left > LXMessage.TICKET_RENEW:
|
|
1454
|
+
RNS.log(
|
|
1455
|
+
f"Found generated ticket for {RNS.prettyhexrep(destination_hash)} with {RNS.prettytime(validity_left)} of validity left, re-using this one",
|
|
1456
|
+
RNS.LOG_DEBUG,
|
|
1457
|
+
)
|
|
1458
|
+
return [expires, ticket]
|
|
1459
|
+
|
|
1460
|
+
else:
|
|
1461
|
+
self.available_tickets["inbound"][destination_hash] = {}
|
|
1462
|
+
|
|
1463
|
+
RNS.log(
|
|
1464
|
+
f"No generated tickets for {RNS.prettyhexrep(destination_hash)} with enough validity found, generating a new one",
|
|
1465
|
+
RNS.LOG_DEBUG,
|
|
1466
|
+
)
|
|
1467
|
+
expires = now + expiry
|
|
1468
|
+
ticket = os.urandom(LXMessage.TICKET_LENGTH)
|
|
1469
|
+
self.available_tickets["inbound"][destination_hash][ticket] = [expires]
|
|
1470
|
+
self.save_available_tickets()
|
|
1471
|
+
|
|
1472
|
+
return [expires, ticket]
|
|
1473
|
+
|
|
1474
|
+
def remember_ticket(self, destination_hash, ticket_entry):
|
|
1475
|
+
expires = ticket_entry[0] - time.time()
|
|
1476
|
+
RNS.log(
|
|
1477
|
+
f"Remembering ticket for {RNS.prettyhexrep(destination_hash)}, expires in {RNS.prettytime(expires)}",
|
|
1478
|
+
RNS.LOG_DEBUG,
|
|
1479
|
+
)
|
|
1480
|
+
self.available_tickets["outbound"][destination_hash] = [
|
|
1481
|
+
ticket_entry[0],
|
|
1482
|
+
ticket_entry[1],
|
|
1483
|
+
]
|
|
1484
|
+
|
|
1485
|
+
def get_outbound_ticket(self, destination_hash):
|
|
1486
|
+
if destination_hash in self.available_tickets["outbound"]:
|
|
1487
|
+
entry = self.available_tickets["outbound"][destination_hash]
|
|
1488
|
+
if entry[0] > time.time():
|
|
1489
|
+
return entry[1]
|
|
1490
|
+
|
|
1491
|
+
return None
|
|
1492
|
+
|
|
1493
|
+
def get_outbound_ticket_expiry(self, destination_hash):
|
|
1494
|
+
if destination_hash in self.available_tickets["outbound"]:
|
|
1495
|
+
entry = self.available_tickets["outbound"][destination_hash]
|
|
1496
|
+
if entry[0] > time.time():
|
|
1497
|
+
return entry[0]
|
|
1498
|
+
|
|
1499
|
+
return None
|
|
1500
|
+
|
|
1501
|
+
def get_inbound_tickets(self, destination_hash):
|
|
1502
|
+
now = time.time()
|
|
1503
|
+
available_tickets = []
|
|
1504
|
+
if destination_hash in self.available_tickets["inbound"]:
|
|
1505
|
+
for inbound_ticket in self.available_tickets["inbound"][destination_hash]:
|
|
1506
|
+
if (
|
|
1507
|
+
now
|
|
1508
|
+
< self.available_tickets["inbound"][destination_hash][
|
|
1509
|
+
inbound_ticket
|
|
1510
|
+
][0]
|
|
1511
|
+
):
|
|
1512
|
+
available_tickets.append(inbound_ticket)
|
|
1513
|
+
|
|
1514
|
+
if len(available_tickets) == 0:
|
|
1515
|
+
return None
|
|
1516
|
+
else:
|
|
1517
|
+
return available_tickets
|
|
1518
|
+
|
|
1519
|
+
def clean_throttled_peers(self):
|
|
1520
|
+
expired_entries = []
|
|
1521
|
+
now = time.time()
|
|
1522
|
+
for peer_hash in self.throttled_peers:
|
|
1523
|
+
if now > self.throttled_peers[peer_hash]:
|
|
1524
|
+
expired_entries.append(peer_hash)
|
|
1525
|
+
|
|
1526
|
+
for peer_hash in expired_entries:
|
|
1527
|
+
self.throttled_peers.pop(peer_hash)
|
|
1528
|
+
|
|
1529
|
+
def clean_message_store(self):
|
|
1530
|
+
RNS.log("Cleaning message store", RNS.LOG_VERBOSE)
|
|
1531
|
+
# Check and remove expired messages
|
|
1532
|
+
now = time.time()
|
|
1533
|
+
removed_entries = {}
|
|
1534
|
+
for transient_id in self.propagation_entries.copy():
|
|
1535
|
+
entry = self.propagation_entries[transient_id]
|
|
1536
|
+
filepath = entry[1]
|
|
1537
|
+
stamp_value = entry[6]
|
|
1538
|
+
filename = os.path.split(filepath)[-1]
|
|
1539
|
+
components = filename.split("_")
|
|
1540
|
+
|
|
1541
|
+
if (
|
|
1542
|
+
len(components) == 3
|
|
1543
|
+
and float(components[1]) > 0
|
|
1544
|
+
and len(os.path.split(components[0])[1])
|
|
1545
|
+
== (RNS.Identity.HASHLENGTH // 8) * 2
|
|
1546
|
+
and int(components[2]) == stamp_value
|
|
1547
|
+
):
|
|
1548
|
+
timestamp = float(components[1])
|
|
1549
|
+
if now > timestamp + LXMRouter.MESSAGE_EXPIRY:
|
|
1550
|
+
RNS.log(
|
|
1551
|
+
"Purging message "
|
|
1552
|
+
+ RNS.prettyhexrep(transient_id)
|
|
1553
|
+
+ " due to expiry",
|
|
1554
|
+
RNS.LOG_EXTREME,
|
|
1555
|
+
)
|
|
1556
|
+
removed_entries[transient_id] = filepath
|
|
1557
|
+
else:
|
|
1558
|
+
RNS.log(
|
|
1559
|
+
"Purging message "
|
|
1560
|
+
+ RNS.prettyhexrep(transient_id)
|
|
1561
|
+
+ " due to invalid file path",
|
|
1562
|
+
RNS.LOG_WARNING,
|
|
1563
|
+
)
|
|
1564
|
+
removed_entries[transient_id] = filepath
|
|
1565
|
+
|
|
1566
|
+
removed_count = 0
|
|
1567
|
+
for transient_id in removed_entries:
|
|
1568
|
+
try:
|
|
1569
|
+
filepath = removed_entries[transient_id]
|
|
1570
|
+
self.propagation_entries.pop(transient_id)
|
|
1571
|
+
if os.path.isfile(filepath):
|
|
1572
|
+
os.unlink(filepath)
|
|
1573
|
+
removed_count += 1
|
|
1574
|
+
except Exception as e:
|
|
1575
|
+
RNS.log(
|
|
1576
|
+
"Could not remove "
|
|
1577
|
+
+ RNS.prettyhexrep(transient_id)
|
|
1578
|
+
+ " from message store. The contained exception was: "
|
|
1579
|
+
+ str(e),
|
|
1580
|
+
RNS.LOG_ERROR,
|
|
1581
|
+
)
|
|
1582
|
+
|
|
1583
|
+
if removed_count > 0:
|
|
1584
|
+
RNS.log(
|
|
1585
|
+
"Cleaned " + str(removed_count) + " entries from the message store",
|
|
1586
|
+
RNS.LOG_VERBOSE,
|
|
1587
|
+
)
|
|
1588
|
+
|
|
1589
|
+
# Check size of message store and cull if needed
|
|
1590
|
+
try:
|
|
1591
|
+
message_storage_size = self.message_storage_size()
|
|
1592
|
+
if message_storage_size != None:
|
|
1593
|
+
if (
|
|
1594
|
+
self.message_storage_limit != None
|
|
1595
|
+
and message_storage_size > self.message_storage_limit
|
|
1596
|
+
):
|
|
1597
|
+
# Clean the message storage according to priorities
|
|
1598
|
+
bytes_needed = message_storage_size - self.message_storage_limit
|
|
1599
|
+
bytes_cleaned = 0
|
|
1600
|
+
|
|
1601
|
+
weighted_entries = []
|
|
1602
|
+
for transient_id in self.propagation_entries.copy():
|
|
1603
|
+
weighted_entries.append(
|
|
1604
|
+
[
|
|
1605
|
+
self.propagation_entries[transient_id],
|
|
1606
|
+
self.get_weight(transient_id),
|
|
1607
|
+
transient_id,
|
|
1608
|
+
]
|
|
1609
|
+
)
|
|
1610
|
+
|
|
1611
|
+
weighted_entries.sort(key=lambda we: we[1], reverse=True)
|
|
1612
|
+
|
|
1613
|
+
i = 0
|
|
1614
|
+
while i < len(weighted_entries) and bytes_cleaned < bytes_needed:
|
|
1615
|
+
try:
|
|
1616
|
+
w = weighted_entries[i]
|
|
1617
|
+
entry = w[0]
|
|
1618
|
+
transient_id = w[2]
|
|
1619
|
+
filepath = entry[1]
|
|
1620
|
+
|
|
1621
|
+
if os.path.isfile(filepath):
|
|
1622
|
+
os.unlink(filepath)
|
|
1623
|
+
|
|
1624
|
+
self.propagation_entries.pop(transient_id)
|
|
1625
|
+
bytes_cleaned += entry[3]
|
|
1626
|
+
|
|
1627
|
+
RNS.log(
|
|
1628
|
+
"Removed "
|
|
1629
|
+
+ RNS.prettyhexrep(transient_id)
|
|
1630
|
+
+ " with weight "
|
|
1631
|
+
+ str(w[1])
|
|
1632
|
+
+ " to clear up "
|
|
1633
|
+
+ RNS.prettysize(entry[3])
|
|
1634
|
+
+ ", now cleaned "
|
|
1635
|
+
+ RNS.prettysize(bytes_cleaned)
|
|
1636
|
+
+ " out of "
|
|
1637
|
+
+ RNS.prettysize(bytes_needed)
|
|
1638
|
+
+ " needed",
|
|
1639
|
+
RNS.LOG_EXTREME,
|
|
1640
|
+
)
|
|
1641
|
+
|
|
1642
|
+
except Exception as e:
|
|
1643
|
+
RNS.log(
|
|
1644
|
+
"Error while cleaning LXMF message from message store. The contained exception was: "
|
|
1645
|
+
+ str(e),
|
|
1646
|
+
RNS.LOG_ERROR,
|
|
1647
|
+
)
|
|
1648
|
+
|
|
1649
|
+
finally:
|
|
1650
|
+
i += 1
|
|
1651
|
+
|
|
1652
|
+
RNS.log(
|
|
1653
|
+
"LXMF message store size is now "
|
|
1654
|
+
+ RNS.prettysize(self.message_storage_size())
|
|
1655
|
+
+ " for "
|
|
1656
|
+
+ str(len(self.propagation_entries))
|
|
1657
|
+
+ " items",
|
|
1658
|
+
RNS.LOG_EXTREME,
|
|
1659
|
+
)
|
|
1660
|
+
|
|
1661
|
+
except Exception as e:
|
|
1662
|
+
RNS.log(
|
|
1663
|
+
"Could not clean the LXMF message store. The contained exception was: "
|
|
1664
|
+
+ str(e),
|
|
1665
|
+
RNS.LOG_ERROR,
|
|
1666
|
+
)
|
|
1667
|
+
|
|
1668
|
+
def save_locally_delivered_transient_ids(self):
|
|
1669
|
+
try:
|
|
1670
|
+
if len(self.locally_delivered_transient_ids) > 0:
|
|
1671
|
+
if not os.path.isdir(self.storagepath):
|
|
1672
|
+
os.makedirs(self.storagepath)
|
|
1673
|
+
|
|
1674
|
+
with open(
|
|
1675
|
+
self.storagepath + "/local_deliveries", "wb"
|
|
1676
|
+
) as locally_delivered_file:
|
|
1677
|
+
locally_delivered_file.write(
|
|
1678
|
+
msgpack.packb(self.locally_delivered_transient_ids)
|
|
1679
|
+
)
|
|
1680
|
+
|
|
1681
|
+
except Exception as e:
|
|
1682
|
+
RNS.log(
|
|
1683
|
+
"Could not save locally delivered message ID cache to storage. The contained exception was: "
|
|
1684
|
+
+ str(e),
|
|
1685
|
+
RNS.LOG_ERROR,
|
|
1686
|
+
)
|
|
1687
|
+
|
|
1688
|
+
def save_locally_processed_transient_ids(self):
|
|
1689
|
+
try:
|
|
1690
|
+
if len(self.locally_processed_transient_ids) > 0:
|
|
1691
|
+
if not os.path.isdir(self.storagepath):
|
|
1692
|
+
os.makedirs(self.storagepath)
|
|
1693
|
+
|
|
1694
|
+
with open(
|
|
1695
|
+
self.storagepath + "/locally_processed", "wb"
|
|
1696
|
+
) as locally_processed_file:
|
|
1697
|
+
locally_processed_file.write(
|
|
1698
|
+
msgpack.packb(self.locally_processed_transient_ids)
|
|
1699
|
+
)
|
|
1700
|
+
|
|
1701
|
+
except Exception as e:
|
|
1702
|
+
RNS.log(
|
|
1703
|
+
"Could not save locally processed transient ID cache to storage. The contained exception was: "
|
|
1704
|
+
+ str(e),
|
|
1705
|
+
RNS.LOG_ERROR,
|
|
1706
|
+
)
|
|
1707
|
+
|
|
1708
|
+
def save_node_stats(self):
|
|
1709
|
+
try:
|
|
1710
|
+
if not os.path.isdir(self.storagepath):
|
|
1711
|
+
os.makedirs(self.storagepath)
|
|
1712
|
+
|
|
1713
|
+
with open(self.storagepath + "/node_stats", "wb") as stats_file:
|
|
1714
|
+
node_stats = {
|
|
1715
|
+
"client_propagation_messages_received": self.client_propagation_messages_received,
|
|
1716
|
+
"client_propagation_messages_served": self.client_propagation_messages_served,
|
|
1717
|
+
"unpeered_propagation_incoming": self.unpeered_propagation_incoming,
|
|
1718
|
+
"unpeered_propagation_rx_bytes": self.unpeered_propagation_rx_bytes,
|
|
1719
|
+
}
|
|
1720
|
+
stats_file.write(msgpack.packb(node_stats))
|
|
1721
|
+
|
|
1722
|
+
except Exception as e:
|
|
1723
|
+
RNS.log(
|
|
1724
|
+
"Could not save local node stats to storage. The contained exception was: "
|
|
1725
|
+
+ str(e),
|
|
1726
|
+
RNS.LOG_ERROR,
|
|
1727
|
+
)
|
|
1728
|
+
|
|
1729
|
+
def clean_outbound_stamp_costs(self):
|
|
1730
|
+
try:
|
|
1731
|
+
expired = []
|
|
1732
|
+
for destination_hash in self.outbound_stamp_costs:
|
|
1733
|
+
entry = self.outbound_stamp_costs[destination_hash]
|
|
1734
|
+
if time.time() > entry[0] + LXMRouter.STAMP_COST_EXPIRY:
|
|
1735
|
+
expired.append(destination_hash)
|
|
1736
|
+
|
|
1737
|
+
for destination_hash in expired:
|
|
1738
|
+
self.outbound_stamp_costs.pop(destination_hash)
|
|
1739
|
+
|
|
1740
|
+
except Exception as e:
|
|
1741
|
+
RNS.log(
|
|
1742
|
+
f"Error while cleaning outbound stamp costs. The contained exception was: {e}",
|
|
1743
|
+
RNS.LOG_ERROR,
|
|
1744
|
+
)
|
|
1745
|
+
RNS.trace_exception(e)
|
|
1746
|
+
|
|
1747
|
+
def save_outbound_stamp_costs(self):
|
|
1748
|
+
with self.cost_file_lock:
|
|
1749
|
+
try:
|
|
1750
|
+
if not os.path.isdir(self.storagepath):
|
|
1751
|
+
os.makedirs(self.storagepath)
|
|
1752
|
+
|
|
1753
|
+
outbound_stamp_costs_file = open(
|
|
1754
|
+
self.storagepath + "/outbound_stamp_costs", "wb"
|
|
1755
|
+
)
|
|
1756
|
+
outbound_stamp_costs_file.write(
|
|
1757
|
+
msgpack.packb(self.outbound_stamp_costs)
|
|
1758
|
+
)
|
|
1759
|
+
outbound_stamp_costs_file.close()
|
|
1760
|
+
|
|
1761
|
+
except Exception as e:
|
|
1762
|
+
RNS.log(
|
|
1763
|
+
"Could not save outbound stamp costs to storage. The contained exception was: "
|
|
1764
|
+
+ str(e),
|
|
1765
|
+
RNS.LOG_ERROR,
|
|
1766
|
+
)
|
|
1767
|
+
|
|
1768
|
+
def clean_available_tickets(self):
|
|
1769
|
+
try:
|
|
1770
|
+
# Clean outbound tickets
|
|
1771
|
+
expired_outbound = []
|
|
1772
|
+
for destination_hash in self.available_tickets["outbound"]:
|
|
1773
|
+
entry = self.available_tickets["outbound"][destination_hash]
|
|
1774
|
+
if time.time() > entry[0]:
|
|
1775
|
+
expired_outbound.append(destination_hash)
|
|
1776
|
+
|
|
1777
|
+
for destination_hash in expired_outbound:
|
|
1778
|
+
self.available_tickets["outbound"].pop(destination_hash)
|
|
1779
|
+
|
|
1780
|
+
# Clean inbound tickets
|
|
1781
|
+
for destination_hash in self.available_tickets["inbound"]:
|
|
1782
|
+
expired_inbound = []
|
|
1783
|
+
for inbound_ticket in self.available_tickets["inbound"][
|
|
1784
|
+
destination_hash
|
|
1785
|
+
]:
|
|
1786
|
+
entry = self.available_tickets["inbound"][destination_hash][
|
|
1787
|
+
inbound_ticket
|
|
1788
|
+
]
|
|
1789
|
+
ticket_expiry = entry[0]
|
|
1790
|
+
if time.time() > ticket_expiry + LXMessage.TICKET_GRACE:
|
|
1791
|
+
expired_inbound.append(inbound_ticket)
|
|
1792
|
+
|
|
1793
|
+
for inbound_ticket in expired_inbound:
|
|
1794
|
+
self.available_tickets["inbound"][destination_hash].pop(
|
|
1795
|
+
inbound_ticket
|
|
1796
|
+
)
|
|
1797
|
+
|
|
1798
|
+
except Exception as e:
|
|
1799
|
+
RNS.log(
|
|
1800
|
+
f"Error while cleaning available tickets. The contained exception was: {e}",
|
|
1801
|
+
RNS.LOG_ERROR,
|
|
1802
|
+
)
|
|
1803
|
+
RNS.trace_exception(e)
|
|
1804
|
+
|
|
1805
|
+
def save_available_tickets(self):
|
|
1806
|
+
with self.ticket_file_lock:
|
|
1807
|
+
try:
|
|
1808
|
+
if not os.path.isdir(self.storagepath):
|
|
1809
|
+
os.makedirs(self.storagepath)
|
|
1810
|
+
|
|
1811
|
+
available_tickets_file = open(
|
|
1812
|
+
self.storagepath + "/available_tickets", "wb"
|
|
1813
|
+
)
|
|
1814
|
+
available_tickets_file.write(msgpack.packb(self.available_tickets))
|
|
1815
|
+
available_tickets_file.close()
|
|
1816
|
+
|
|
1817
|
+
except Exception as e:
|
|
1818
|
+
RNS.log(
|
|
1819
|
+
"Could not save available tickets to storage. The contained exception was: "
|
|
1820
|
+
+ str(e),
|
|
1821
|
+
RNS.LOG_ERROR,
|
|
1822
|
+
)
|
|
1823
|
+
|
|
1824
|
+
def reload_available_tickets(self):
|
|
1825
|
+
RNS.log("Reloading available tickets from storage", RNS.LOG_DEBUG)
|
|
1826
|
+
try:
|
|
1827
|
+
with self.ticket_file_lock:
|
|
1828
|
+
with open(
|
|
1829
|
+
self.storagepath + "/available_tickets", "rb"
|
|
1830
|
+
) as available_tickets_file:
|
|
1831
|
+
data = available_tickets_file.read()
|
|
1832
|
+
self.available_tickets = msgpack.unpackb(data)
|
|
1833
|
+
if not type(self.available_tickets) == dict:
|
|
1834
|
+
RNS.log(
|
|
1835
|
+
"Invalid data format for loaded available tickets, recreating...",
|
|
1836
|
+
RNS.LOG_ERROR,
|
|
1837
|
+
)
|
|
1838
|
+
self.available_tickets = {
|
|
1839
|
+
"outbound": {},
|
|
1840
|
+
"inbound": {},
|
|
1841
|
+
"last_deliveries": {},
|
|
1842
|
+
}
|
|
1843
|
+
if not "outbound" in self.available_tickets:
|
|
1844
|
+
RNS.log(
|
|
1845
|
+
"Missing outbound entry in loaded available tickets, recreating...",
|
|
1846
|
+
RNS.LOG_ERROR,
|
|
1847
|
+
)
|
|
1848
|
+
self.available_tickets["outbound"] = {}
|
|
1849
|
+
if not "inbound" in self.available_tickets:
|
|
1850
|
+
RNS.log(
|
|
1851
|
+
"Missing inbound entry in loaded available tickets, recreating...",
|
|
1852
|
+
RNS.LOG_ERROR,
|
|
1853
|
+
)
|
|
1854
|
+
self.available_tickets["inbound"] = {}
|
|
1855
|
+
if not "last_deliveries" in self.available_tickets:
|
|
1856
|
+
RNS.log(
|
|
1857
|
+
"Missing local_deliveries entry in loaded available tickets, recreating...",
|
|
1858
|
+
RNS.LOG_ERROR,
|
|
1859
|
+
)
|
|
1860
|
+
self.available_tickets["last_deliveries"] = {}
|
|
1861
|
+
|
|
1862
|
+
except Exception as e:
|
|
1863
|
+
RNS.log(
|
|
1864
|
+
f"An error occurred while reloading available tickets from storage: {e}",
|
|
1865
|
+
RNS.LOG_ERROR,
|
|
1866
|
+
)
|
|
1867
|
+
|
|
1868
|
+
def exit_handler(self):
|
|
1869
|
+
if self.exit_handler_running:
|
|
1870
|
+
return
|
|
1871
|
+
|
|
1872
|
+
self.exit_handler_running = True
|
|
1873
|
+
|
|
1874
|
+
RNS.log("Tearing down delivery destinations...", RNS.LOG_NOTICE)
|
|
1875
|
+
for destination_hash in self.delivery_destinations:
|
|
1876
|
+
delivery_destination = self.delivery_destinations[destination_hash]
|
|
1877
|
+
delivery_destination.set_packet_callback(None)
|
|
1878
|
+
delivery_destination.set_link_established_callback(None)
|
|
1879
|
+
for link in delivery_destination.links:
|
|
1880
|
+
try:
|
|
1881
|
+
if link.status == RNS.Link.ACTIVE:
|
|
1882
|
+
link.teardown()
|
|
1883
|
+
except Exception as e:
|
|
1884
|
+
RNS.log(
|
|
1885
|
+
"Error while tearing down propagation link: {e}", RNS.LOG_ERROR
|
|
1886
|
+
)
|
|
1887
|
+
|
|
1888
|
+
if self.propagation_node:
|
|
1889
|
+
RNS.log("Tearing down propagation node destination...", RNS.LOG_NOTICE)
|
|
1890
|
+
self.propagation_destination.set_link_established_callback(None)
|
|
1891
|
+
self.propagation_destination.set_packet_callback(None)
|
|
1892
|
+
self.propagation_destination.deregister_request_handler(
|
|
1893
|
+
LXMPeer.OFFER_REQUEST_PATH
|
|
1894
|
+
)
|
|
1895
|
+
self.propagation_destination.deregister_request_handler(
|
|
1896
|
+
LXMPeer.MESSAGE_GET_PATH
|
|
1897
|
+
)
|
|
1898
|
+
self.propagation_destination.deregister_request_handler(
|
|
1899
|
+
LXMRouter.STATS_GET_PATH
|
|
1900
|
+
)
|
|
1901
|
+
self.propagation_destination.deregister_request_handler(
|
|
1902
|
+
LXMRouter.SYNC_REQUEST_PATH
|
|
1903
|
+
)
|
|
1904
|
+
self.propagation_destination.deregister_request_handler(
|
|
1905
|
+
LXMRouter.UNPEER_REQUEST_PATH
|
|
1906
|
+
)
|
|
1907
|
+
for link in self.active_propagation_links:
|
|
1908
|
+
try:
|
|
1909
|
+
if link.status == RNS.Link.ACTIVE:
|
|
1910
|
+
link.teardown()
|
|
1911
|
+
except Exception as e:
|
|
1912
|
+
RNS.log(
|
|
1913
|
+
"Error while tearing down propagation link: {e}", RNS.LOG_ERROR
|
|
1914
|
+
)
|
|
1915
|
+
|
|
1916
|
+
RNS.log("Persisting LXMF state data to storage...", RNS.LOG_NOTICE)
|
|
1917
|
+
self.flush_queues()
|
|
1918
|
+
if self.propagation_node:
|
|
1919
|
+
try:
|
|
1920
|
+
st = time.time()
|
|
1921
|
+
RNS.log(
|
|
1922
|
+
f"Saving {len(self.peers)} peer synchronisation states to storage...",
|
|
1923
|
+
RNS.LOG_NOTICE,
|
|
1924
|
+
)
|
|
1925
|
+
serialised_peers = []
|
|
1926
|
+
peer_dict = self.peers.copy()
|
|
1927
|
+
for peer_id in peer_dict:
|
|
1928
|
+
peer = self.peers[peer_id]
|
|
1929
|
+
serialised_peers.append(peer.to_bytes())
|
|
1930
|
+
|
|
1931
|
+
peers_file = open(self.storagepath + "/peers", "wb")
|
|
1932
|
+
peers_file.write(msgpack.packb(serialised_peers))
|
|
1933
|
+
peers_file.close()
|
|
1934
|
+
|
|
1935
|
+
RNS.log(
|
|
1936
|
+
f"Saved {len(serialised_peers)} peers to storage in {RNS.prettyshorttime(time.time()-st)}",
|
|
1937
|
+
RNS.LOG_NOTICE,
|
|
1938
|
+
)
|
|
1939
|
+
|
|
1940
|
+
except Exception as e:
|
|
1941
|
+
RNS.log(
|
|
1942
|
+
"Could not save propagation node peers to storage. The contained exception was: "
|
|
1943
|
+
+ str(e),
|
|
1944
|
+
RNS.LOG_ERROR,
|
|
1945
|
+
)
|
|
1946
|
+
|
|
1947
|
+
self.save_locally_delivered_transient_ids()
|
|
1948
|
+
self.save_locally_processed_transient_ids()
|
|
1949
|
+
self.save_node_stats()
|
|
1950
|
+
|
|
1951
|
+
def sigint_handler(self, signal, frame):
|
|
1952
|
+
if not self.exit_handler_running:
|
|
1953
|
+
RNS.log("Received SIGINT, shutting down now!", RNS.LOG_WARNING)
|
|
1954
|
+
self.exit_handler()
|
|
1955
|
+
RNS.exit(0)
|
|
1956
|
+
else:
|
|
1957
|
+
RNS.log(
|
|
1958
|
+
"Received SIGINT, but exit handler is running, keeping process alive until storage persist is complete",
|
|
1959
|
+
RNS.LOG_WARNING,
|
|
1960
|
+
)
|
|
1961
|
+
|
|
1962
|
+
def sigterm_handler(self, signal, frame):
|
|
1963
|
+
if not self.exit_handler_running:
|
|
1964
|
+
RNS.log("Received SIGTERM, shutting down now!", RNS.LOG_WARNING)
|
|
1965
|
+
self.exit_handler()
|
|
1966
|
+
RNS.exit(0)
|
|
1967
|
+
else:
|
|
1968
|
+
RNS.log(
|
|
1969
|
+
"Received SIGTERM, but exit handler is running, keeping process alive until storage persist is complete",
|
|
1970
|
+
RNS.LOG_WARNING,
|
|
1971
|
+
)
|
|
1972
|
+
|
|
1973
|
+
def __str__(self):
|
|
1974
|
+
return "<LXMRouter " + RNS.hexrep(self.identity.hash, delimit=False) + ">"
|
|
1975
|
+
|
|
1976
|
+
### Message Download ##################################
|
|
1977
|
+
#######################################################
|
|
1978
|
+
|
|
1979
|
+
def request_messages_path_job(self):
|
|
1980
|
+
job_thread = threading.Thread(target=self.__request_messages_path_job)
|
|
1981
|
+
job_thread.setDaemon(True)
|
|
1982
|
+
job_thread.start()
|
|
1983
|
+
|
|
1984
|
+
def __request_messages_path_job(self):
|
|
1985
|
+
path_timeout = self.wants_download_on_path_available_timeout
|
|
1986
|
+
while (
|
|
1987
|
+
not RNS.Transport.has_path(self.wants_download_on_path_available_from)
|
|
1988
|
+
and time.time() < path_timeout
|
|
1989
|
+
):
|
|
1990
|
+
time.sleep(0.1)
|
|
1991
|
+
|
|
1992
|
+
if RNS.Transport.has_path(self.wants_download_on_path_available_from):
|
|
1993
|
+
self.request_messages_from_propagation_node(
|
|
1994
|
+
self.wants_download_on_path_available_to,
|
|
1995
|
+
self.propagation_transfer_max_messages,
|
|
1996
|
+
)
|
|
1997
|
+
else:
|
|
1998
|
+
RNS.log("Propagation node path request timed out", RNS.LOG_DEBUG)
|
|
1999
|
+
self.acknowledge_sync_completion(failure_state=LXMRouter.PR_NO_PATH)
|
|
2000
|
+
|
|
2001
|
+
def identity_allowed(self, identity):
|
|
2002
|
+
if self.auth_required:
|
|
2003
|
+
if identity.hash in self.allowed_list:
|
|
2004
|
+
return True
|
|
2005
|
+
else:
|
|
2006
|
+
return False
|
|
2007
|
+
|
|
2008
|
+
else:
|
|
2009
|
+
return True
|
|
2010
|
+
|
|
2011
|
+
def message_get_request(
|
|
2012
|
+
self, path, data, request_id, remote_identity, requested_at
|
|
2013
|
+
):
|
|
2014
|
+
if remote_identity == None:
|
|
2015
|
+
return LXMPeer.ERROR_NO_IDENTITY
|
|
2016
|
+
elif not self.identity_allowed(remote_identity):
|
|
2017
|
+
return LXMPeer.ERROR_NO_ACCESS
|
|
2018
|
+
else:
|
|
2019
|
+
try:
|
|
2020
|
+
remote_destination = RNS.Destination(
|
|
2021
|
+
remote_identity,
|
|
2022
|
+
RNS.Destination.OUT,
|
|
2023
|
+
RNS.Destination.SINGLE,
|
|
2024
|
+
APP_NAME,
|
|
2025
|
+
"delivery",
|
|
2026
|
+
)
|
|
2027
|
+
|
|
2028
|
+
# If both want and have fields are empty, send a list of
|
|
2029
|
+
# available messages.
|
|
2030
|
+
if data[0] == None and data[1] == None:
|
|
2031
|
+
available_messages = []
|
|
2032
|
+
for transient_id in self.propagation_entries:
|
|
2033
|
+
message_entry = self.propagation_entries[transient_id]
|
|
2034
|
+
if message_entry[0] == remote_destination.hash:
|
|
2035
|
+
message_size = os.path.getsize(message_entry[1])
|
|
2036
|
+
available_entry = [transient_id, message_size]
|
|
2037
|
+
available_messages.append(available_entry)
|
|
2038
|
+
|
|
2039
|
+
available_messages.sort(key=lambda e: e[1], reverse=False)
|
|
2040
|
+
|
|
2041
|
+
transient_ids = []
|
|
2042
|
+
for available_entry in available_messages:
|
|
2043
|
+
transient_ids.append(available_entry[0])
|
|
2044
|
+
return transient_ids
|
|
2045
|
+
|
|
2046
|
+
else:
|
|
2047
|
+
# Process messages the client already have
|
|
2048
|
+
if data[1] != None and len(data[1]) > 0:
|
|
2049
|
+
for transient_id in data[1]:
|
|
2050
|
+
if (
|
|
2051
|
+
transient_id in self.propagation_entries
|
|
2052
|
+
and self.propagation_entries[transient_id][0]
|
|
2053
|
+
== remote_destination.hash
|
|
2054
|
+
):
|
|
2055
|
+
try:
|
|
2056
|
+
filepath = self.propagation_entries[transient_id][1]
|
|
2057
|
+
self.propagation_entries.pop(transient_id)
|
|
2058
|
+
os.unlink(filepath)
|
|
2059
|
+
# TODO: Remove debug
|
|
2060
|
+
# RNS.log("Client "+RNS.prettyhexrep(remote_destination.hash)+" purged message "+RNS.prettyhexrep(transient_id)+" at "+str(filepath), RNS.LOG_DEBUG)
|
|
2061
|
+
|
|
2062
|
+
except Exception as e:
|
|
2063
|
+
RNS.log(
|
|
2064
|
+
"Error while processing message purge request from "
|
|
2065
|
+
+ RNS.prettyhexrep(remote_destination.hash)
|
|
2066
|
+
+ ". The contained exception was: "
|
|
2067
|
+
+ str(e),
|
|
2068
|
+
RNS.LOG_ERROR,
|
|
2069
|
+
)
|
|
2070
|
+
|
|
2071
|
+
# Process wanted messages
|
|
2072
|
+
response_messages = []
|
|
2073
|
+
if data[0] != None and len(data[0]) > 0:
|
|
2074
|
+
client_transfer_limit = None
|
|
2075
|
+
if len(data) >= 3:
|
|
2076
|
+
try:
|
|
2077
|
+
client_transfer_limit = float(data[2]) * 1000
|
|
2078
|
+
RNS.log(
|
|
2079
|
+
"Client indicates transfer limit of "
|
|
2080
|
+
+ RNS.prettysize(client_transfer_limit),
|
|
2081
|
+
RNS.LOG_DEBUG,
|
|
2082
|
+
)
|
|
2083
|
+
except:
|
|
2084
|
+
pass
|
|
2085
|
+
|
|
2086
|
+
per_message_overhead = (
|
|
2087
|
+
16 # Really only 2 bytes, but set a bit higher for now
|
|
2088
|
+
)
|
|
2089
|
+
cumulative_size = 24 # Initialised to highest reasonable binary structure overhead
|
|
2090
|
+
for transient_id in data[0]:
|
|
2091
|
+
if (
|
|
2092
|
+
transient_id in self.propagation_entries
|
|
2093
|
+
and self.propagation_entries[transient_id][0]
|
|
2094
|
+
== remote_destination.hash
|
|
2095
|
+
):
|
|
2096
|
+
try:
|
|
2097
|
+
filepath = self.propagation_entries[transient_id][1]
|
|
2098
|
+
RNS.log(
|
|
2099
|
+
"Client "
|
|
2100
|
+
+ RNS.prettyhexrep(remote_destination.hash)
|
|
2101
|
+
+ " requested message "
|
|
2102
|
+
+ RNS.prettyhexrep(transient_id)
|
|
2103
|
+
+ " at "
|
|
2104
|
+
+ str(filepath),
|
|
2105
|
+
RNS.LOG_DEBUG,
|
|
2106
|
+
)
|
|
2107
|
+
|
|
2108
|
+
message_file = open(filepath, "rb")
|
|
2109
|
+
lxmf_data = message_file.read()
|
|
2110
|
+
message_file.close()
|
|
2111
|
+
|
|
2112
|
+
lxm_size = len(lxmf_data)
|
|
2113
|
+
next_size = cumulative_size + (
|
|
2114
|
+
lxm_size + per_message_overhead
|
|
2115
|
+
)
|
|
2116
|
+
|
|
2117
|
+
if (
|
|
2118
|
+
client_transfer_limit != None
|
|
2119
|
+
and next_size > client_transfer_limit
|
|
2120
|
+
):
|
|
2121
|
+
pass
|
|
2122
|
+
else:
|
|
2123
|
+
response_messages.append(
|
|
2124
|
+
lxmf_data[: -LXStamper.STAMP_SIZE]
|
|
2125
|
+
)
|
|
2126
|
+
cumulative_size += (
|
|
2127
|
+
lxm_size + per_message_overhead
|
|
2128
|
+
)
|
|
2129
|
+
|
|
2130
|
+
except Exception as e:
|
|
2131
|
+
RNS.log(
|
|
2132
|
+
"Error while processing message download request from "
|
|
2133
|
+
+ RNS.prettyhexrep(remote_destination.hash)
|
|
2134
|
+
+ ". The contained exception was: "
|
|
2135
|
+
+ str(e),
|
|
2136
|
+
RNS.LOG_ERROR,
|
|
2137
|
+
)
|
|
2138
|
+
|
|
2139
|
+
self.client_propagation_messages_served += len(response_messages)
|
|
2140
|
+
return response_messages
|
|
2141
|
+
|
|
2142
|
+
except Exception as e:
|
|
2143
|
+
RNS.log(
|
|
2144
|
+
"Error occurred while generating response for download request, the contained exception was: "
|
|
2145
|
+
+ str(e),
|
|
2146
|
+
RNS.LOG_DEBUG,
|
|
2147
|
+
)
|
|
2148
|
+
return None
|
|
2149
|
+
|
|
2150
|
+
def message_list_response(self, request_receipt):
|
|
2151
|
+
if request_receipt.response == LXMPeer.ERROR_NO_IDENTITY:
|
|
2152
|
+
RNS.log(
|
|
2153
|
+
"Propagation node indicated missing identification on list request, tearing down link.",
|
|
2154
|
+
RNS.LOG_DEBUG,
|
|
2155
|
+
)
|
|
2156
|
+
if self.outbound_propagation_link != None:
|
|
2157
|
+
self.outbound_propagation_link.teardown()
|
|
2158
|
+
self.propagation_transfer_state = LXMRouter.PR_NO_IDENTITY_RCVD
|
|
2159
|
+
|
|
2160
|
+
elif request_receipt.response == LXMPeer.ERROR_NO_ACCESS:
|
|
2161
|
+
RNS.log(
|
|
2162
|
+
"Propagation node did not allow list request, tearing down link.",
|
|
2163
|
+
RNS.LOG_DEBUG,
|
|
2164
|
+
)
|
|
2165
|
+
if self.outbound_propagation_link != None:
|
|
2166
|
+
self.outbound_propagation_link.teardown()
|
|
2167
|
+
self.propagation_transfer_state = LXMRouter.PR_NO_ACCESS
|
|
2168
|
+
|
|
2169
|
+
else:
|
|
2170
|
+
if request_receipt.response != None and isinstance(
|
|
2171
|
+
request_receipt.response, list
|
|
2172
|
+
):
|
|
2173
|
+
haves = []
|
|
2174
|
+
wants = []
|
|
2175
|
+
if len(request_receipt.response) > 0:
|
|
2176
|
+
for transient_id in request_receipt.response:
|
|
2177
|
+
if self.has_message(transient_id):
|
|
2178
|
+
if not self.retain_synced_on_node:
|
|
2179
|
+
haves.append(transient_id)
|
|
2180
|
+
else:
|
|
2181
|
+
if (
|
|
2182
|
+
self.propagation_transfer_max_messages
|
|
2183
|
+
== LXMRouter.PR_ALL_MESSAGES
|
|
2184
|
+
or len(wants) < self.propagation_transfer_max_messages
|
|
2185
|
+
):
|
|
2186
|
+
wants.append(transient_id)
|
|
2187
|
+
|
|
2188
|
+
ms = "" if len(wants) == 1 else "s"
|
|
2189
|
+
RNS.log(
|
|
2190
|
+
f"Requesting {len(wants)} message{ms} from propagation node",
|
|
2191
|
+
RNS.LOG_DEBUG,
|
|
2192
|
+
)
|
|
2193
|
+
request_receipt.link.request(
|
|
2194
|
+
LXMPeer.MESSAGE_GET_PATH,
|
|
2195
|
+
[wants, haves, self.delivery_per_transfer_limit],
|
|
2196
|
+
response_callback=self.message_get_response,
|
|
2197
|
+
failed_callback=self.message_get_failed,
|
|
2198
|
+
progress_callback=self.message_get_progress,
|
|
2199
|
+
)
|
|
2200
|
+
|
|
2201
|
+
else:
|
|
2202
|
+
self.propagation_transfer_state = LXMRouter.PR_COMPLETE
|
|
2203
|
+
self.propagation_transfer_progress = 1.0
|
|
2204
|
+
self.propagation_transfer_last_result = 0
|
|
2205
|
+
|
|
2206
|
+
else:
|
|
2207
|
+
RNS.log(
|
|
2208
|
+
"Invalid message list data received from propagation node",
|
|
2209
|
+
RNS.LOG_DEBUG,
|
|
2210
|
+
)
|
|
2211
|
+
if self.outbound_propagation_link != None:
|
|
2212
|
+
self.outbound_propagation_link.teardown()
|
|
2213
|
+
|
|
2214
|
+
def message_get_response(self, request_receipt):
|
|
2215
|
+
if request_receipt.response == LXMPeer.ERROR_NO_IDENTITY:
|
|
2216
|
+
RNS.log(
|
|
2217
|
+
"Propagation node indicated missing identification on get request, tearing down link.",
|
|
2218
|
+
RNS.LOG_DEBUG,
|
|
2219
|
+
)
|
|
2220
|
+
if self.outbound_propagation_link != None:
|
|
2221
|
+
self.outbound_propagation_link.teardown()
|
|
2222
|
+
self.propagation_transfer_state = LXMRouter.PR_NO_IDENTITY_RCVD
|
|
2223
|
+
|
|
2224
|
+
elif request_receipt.response == LXMPeer.ERROR_NO_ACCESS:
|
|
2225
|
+
RNS.log(
|
|
2226
|
+
"Propagation node did not allow get request, tearing down link.",
|
|
2227
|
+
RNS.LOG_DEBUG,
|
|
2228
|
+
)
|
|
2229
|
+
if self.outbound_propagation_link != None:
|
|
2230
|
+
self.outbound_propagation_link.teardown()
|
|
2231
|
+
self.propagation_transfer_state = LXMRouter.PR_NO_ACCESS
|
|
2232
|
+
|
|
2233
|
+
else:
|
|
2234
|
+
duplicates = 0
|
|
2235
|
+
if request_receipt.response != None and len(request_receipt.response) > 0:
|
|
2236
|
+
haves = []
|
|
2237
|
+
for lxmf_data in request_receipt.response:
|
|
2238
|
+
result = self.lxmf_propagation(
|
|
2239
|
+
lxmf_data, signal_duplicate=LXMRouter.DUPLICATE_SIGNAL
|
|
2240
|
+
)
|
|
2241
|
+
if result == LXMRouter.DUPLICATE_SIGNAL:
|
|
2242
|
+
duplicates += 1
|
|
2243
|
+
haves.append(RNS.Identity.full_hash(lxmf_data))
|
|
2244
|
+
|
|
2245
|
+
# Return a list of successfully received messages to the node.
|
|
2246
|
+
# This deletes the messages on the propagation node.
|
|
2247
|
+
# TODO: Add option to keep messages on node.
|
|
2248
|
+
request_receipt.link.request(
|
|
2249
|
+
LXMPeer.MESSAGE_GET_PATH,
|
|
2250
|
+
[None, haves],
|
|
2251
|
+
# response_callback=self.message_syncfinal_response,
|
|
2252
|
+
failed_callback=self.message_get_failed,
|
|
2253
|
+
# progress_callback=self.message_get_progress
|
|
2254
|
+
)
|
|
2255
|
+
|
|
2256
|
+
self.propagation_transfer_state = LXMRouter.PR_COMPLETE
|
|
2257
|
+
self.propagation_transfer_progress = 1.0
|
|
2258
|
+
self.propagation_transfer_last_duplicates = duplicates
|
|
2259
|
+
self.propagation_transfer_last_result = len(request_receipt.response)
|
|
2260
|
+
self.save_locally_delivered_transient_ids()
|
|
2261
|
+
|
|
2262
|
+
def message_get_progress(self, request_receipt):
|
|
2263
|
+
self.propagation_transfer_state = LXMRouter.PR_RECEIVING
|
|
2264
|
+
self.propagation_transfer_progress = request_receipt.get_progress()
|
|
2265
|
+
|
|
2266
|
+
def message_get_failed(self, request_receipt):
|
|
2267
|
+
RNS.log("Message list/get request failed", RNS.LOG_DEBUG)
|
|
2268
|
+
if self.outbound_propagation_link != None:
|
|
2269
|
+
self.outbound_propagation_link.teardown()
|
|
2270
|
+
|
|
2271
|
+
def acknowledge_sync_completion(self, reset_state=False, failure_state=None):
|
|
2272
|
+
self.propagation_transfer_last_result = None
|
|
2273
|
+
if reset_state or self.propagation_transfer_state <= LXMRouter.PR_COMPLETE:
|
|
2274
|
+
if failure_state == None:
|
|
2275
|
+
self.propagation_transfer_state = LXMRouter.PR_IDLE
|
|
2276
|
+
else:
|
|
2277
|
+
self.propagation_transfer_state = failure_state
|
|
2278
|
+
|
|
2279
|
+
self.propagation_transfer_progress = 0.0
|
|
2280
|
+
self.wants_download_on_path_available_from = None
|
|
2281
|
+
self.wants_download_on_path_available_to = None
|
|
2282
|
+
|
|
2283
|
+
def has_message(self, transient_id):
|
|
2284
|
+
if transient_id in self.locally_delivered_transient_ids:
|
|
2285
|
+
return True
|
|
2286
|
+
else:
|
|
2287
|
+
return False
|
|
2288
|
+
|
|
2289
|
+
def cancel_outbound(self, message_id, cancel_state=LXMessage.CANCELLED):
|
|
2290
|
+
try:
|
|
2291
|
+
if message_id in self.pending_deferred_stamps:
|
|
2292
|
+
lxm = self.pending_deferred_stamps[message_id]
|
|
2293
|
+
RNS.log(
|
|
2294
|
+
f"Cancelling deferred stamp generation for {lxm}", RNS.LOG_DEBUG
|
|
2295
|
+
)
|
|
2296
|
+
lxm.state = cancel_state
|
|
2297
|
+
LXStamper.cancel_work(message_id)
|
|
2298
|
+
|
|
2299
|
+
lxmessage = None
|
|
2300
|
+
for lxm in self.pending_outbound:
|
|
2301
|
+
if lxm.message_id == message_id:
|
|
2302
|
+
lxmessage = lxm
|
|
2303
|
+
|
|
2304
|
+
if lxmessage != None:
|
|
2305
|
+
lxmessage.state = cancel_state
|
|
2306
|
+
if lxmessage in self.pending_outbound:
|
|
2307
|
+
RNS.log(f"Cancelling {lxmessage} in outbound queue", RNS.LOG_DEBUG)
|
|
2308
|
+
if lxmessage.representation == LXMessage.RESOURCE:
|
|
2309
|
+
if lxmessage.resource_representation != None:
|
|
2310
|
+
lxmessage.resource_representation.cancel()
|
|
2311
|
+
|
|
2312
|
+
self.process_outbound()
|
|
2313
|
+
|
|
2314
|
+
except Exception as e:
|
|
2315
|
+
RNS.log(
|
|
2316
|
+
f"An error occurred while cancelling {lxmessage}: {e}", RNS.LOG_ERROR
|
|
2317
|
+
)
|
|
2318
|
+
RNS.trace_exception(e)
|
|
2319
|
+
|
|
2320
|
+
def handle_outbound(self, lxmessage):
|
|
2321
|
+
destination_hash = lxmessage.get_destination().hash
|
|
2322
|
+
|
|
2323
|
+
if lxmessage.stamp_cost == None:
|
|
2324
|
+
if destination_hash in self.outbound_stamp_costs:
|
|
2325
|
+
stamp_cost = self.outbound_stamp_costs[destination_hash][1]
|
|
2326
|
+
lxmessage.stamp_cost = stamp_cost
|
|
2327
|
+
RNS.log(
|
|
2328
|
+
f"No stamp cost set on LXM to {RNS.prettyhexrep(destination_hash)}, autoconfigured to {stamp_cost}, as required by latest announce",
|
|
2329
|
+
RNS.LOG_DEBUG,
|
|
2330
|
+
)
|
|
2331
|
+
|
|
2332
|
+
lxmessage.state = LXMessage.OUTBOUND
|
|
2333
|
+
|
|
2334
|
+
# If an outbound ticket is available for this
|
|
2335
|
+
# destination, attach it to the message.
|
|
2336
|
+
lxmessage.outbound_ticket = self.get_outbound_ticket(destination_hash)
|
|
2337
|
+
if lxmessage.outbound_ticket != None and lxmessage.defer_stamp:
|
|
2338
|
+
RNS.log(
|
|
2339
|
+
f"Deferred stamp generation was requested for {lxmessage}, but outbound ticket was applied, processing immediately",
|
|
2340
|
+
RNS.LOG_DEBUG,
|
|
2341
|
+
)
|
|
2342
|
+
lxmessage.defer_stamp = False
|
|
2343
|
+
|
|
2344
|
+
# If requested, include a ticket to allow the
|
|
2345
|
+
# destination to reply without generating a stamp.
|
|
2346
|
+
if lxmessage.include_ticket:
|
|
2347
|
+
ticket = self.generate_ticket(lxmessage.destination_hash)
|
|
2348
|
+
if ticket:
|
|
2349
|
+
lxmessage.fields[FIELD_TICKET] = ticket
|
|
2350
|
+
|
|
2351
|
+
if not lxmessage.packed:
|
|
2352
|
+
lxmessage.pack()
|
|
2353
|
+
|
|
2354
|
+
unknown_path_requested = False
|
|
2355
|
+
if (
|
|
2356
|
+
not RNS.Transport.has_path(destination_hash)
|
|
2357
|
+
and lxmessage.method == LXMessage.OPPORTUNISTIC
|
|
2358
|
+
):
|
|
2359
|
+
RNS.log(
|
|
2360
|
+
f"Pre-emptively requesting unknown path for opportunistic {lxmessage}",
|
|
2361
|
+
RNS.LOG_DEBUG,
|
|
2362
|
+
)
|
|
2363
|
+
RNS.Transport.request_path(destination_hash)
|
|
2364
|
+
lxmessage.next_delivery_attempt = time.time() + LXMRouter.PATH_REQUEST_WAIT
|
|
2365
|
+
unknown_path_requested = True
|
|
2366
|
+
|
|
2367
|
+
lxmessage.determine_transport_encryption()
|
|
2368
|
+
|
|
2369
|
+
if lxmessage.defer_stamp and lxmessage.stamp_cost == None:
|
|
2370
|
+
RNS.log(
|
|
2371
|
+
f"Deferred stamp generation was requested for {lxmessage}, but no stamp is required, processing immediately",
|
|
2372
|
+
RNS.LOG_DEBUG,
|
|
2373
|
+
)
|
|
2374
|
+
lxmessage.defer_stamp = False
|
|
2375
|
+
|
|
2376
|
+
if not lxmessage.defer_stamp and not (
|
|
2377
|
+
lxmessage.desired_method == LXMessage.PROPAGATED
|
|
2378
|
+
and lxmessage.defer_propagation_stamp
|
|
2379
|
+
):
|
|
2380
|
+
while not unknown_path_requested and self.processing_outbound:
|
|
2381
|
+
time.sleep(0.05)
|
|
2382
|
+
|
|
2383
|
+
self.pending_outbound.append(lxmessage)
|
|
2384
|
+
if not unknown_path_requested:
|
|
2385
|
+
self.process_outbound()
|
|
2386
|
+
|
|
2387
|
+
else:
|
|
2388
|
+
self.pending_deferred_stamps[lxmessage.message_id] = lxmessage
|
|
2389
|
+
|
|
2390
|
+
def get_outbound_progress(self, lxm_hash):
|
|
2391
|
+
for lxm in self.pending_outbound:
|
|
2392
|
+
if lxm.hash == lxm_hash:
|
|
2393
|
+
return lxm.progress
|
|
2394
|
+
|
|
2395
|
+
for lxm_id in self.pending_deferred_stamps:
|
|
2396
|
+
if self.pending_deferred_stamps[lxm_id].hash == lxm_hash:
|
|
2397
|
+
return self.pending_deferred_stamps[lxm_id].progress
|
|
2398
|
+
|
|
2399
|
+
return None
|
|
2400
|
+
|
|
2401
|
+
def get_outbound_lxm_stamp_cost(self, lxm_hash):
|
|
2402
|
+
for lxm in self.pending_outbound:
|
|
2403
|
+
if lxm.hash == lxm_hash:
|
|
2404
|
+
if lxm.outbound_ticket:
|
|
2405
|
+
return None
|
|
2406
|
+
else:
|
|
2407
|
+
return lxm.stamp_cost
|
|
2408
|
+
|
|
2409
|
+
for lxm_id in self.pending_deferred_stamps:
|
|
2410
|
+
if self.pending_deferred_stamps[lxm_id].hash == lxm_hash:
|
|
2411
|
+
lxm = self.pending_deferred_stamps[lxm_id]
|
|
2412
|
+
if lxm.outbound_ticket:
|
|
2413
|
+
return None
|
|
2414
|
+
else:
|
|
2415
|
+
return lxm.stamp_cost
|
|
2416
|
+
|
|
2417
|
+
return None
|
|
2418
|
+
|
|
2419
|
+
def get_outbound_lxm_propagation_stamp_cost(self, lxm_hash):
|
|
2420
|
+
for lxm in self.pending_outbound:
|
|
2421
|
+
if lxm.hash == lxm_hash:
|
|
2422
|
+
return lxm.propagation_target_cost
|
|
2423
|
+
|
|
2424
|
+
for lxm_id in self.pending_deferred_stamps:
|
|
2425
|
+
if self.pending_deferred_stamps[lxm_id].hash == lxm_hash:
|
|
2426
|
+
return self.pending_deferred_stamps[lxm_id].propagation_target_cost
|
|
2427
|
+
|
|
2428
|
+
return None
|
|
2429
|
+
|
|
2430
|
+
### Message Routing & Delivery ########################
|
|
2431
|
+
#######################################################
|
|
2432
|
+
|
|
2433
|
+
def lxmf_delivery(
|
|
2434
|
+
self,
|
|
2435
|
+
lxmf_data,
|
|
2436
|
+
destination_type=None,
|
|
2437
|
+
phy_stats=None,
|
|
2438
|
+
ratchet_id=None,
|
|
2439
|
+
method=None,
|
|
2440
|
+
no_stamp_enforcement=False,
|
|
2441
|
+
allow_duplicate=False,
|
|
2442
|
+
):
|
|
2443
|
+
try:
|
|
2444
|
+
message = LXMessage.unpack_from_bytes(lxmf_data)
|
|
2445
|
+
if ratchet_id and not message.ratchet_id:
|
|
2446
|
+
message.ratchet_id = ratchet_id
|
|
2447
|
+
|
|
2448
|
+
if method:
|
|
2449
|
+
message.method = method
|
|
2450
|
+
|
|
2451
|
+
if message.signature_validated and FIELD_TICKET in message.fields:
|
|
2452
|
+
ticket_entry = message.fields[FIELD_TICKET]
|
|
2453
|
+
if type(ticket_entry) == list and len(ticket_entry) > 1:
|
|
2454
|
+
expires = ticket_entry[0]
|
|
2455
|
+
ticket = ticket_entry[1]
|
|
2456
|
+
|
|
2457
|
+
if time.time() < expires:
|
|
2458
|
+
if (
|
|
2459
|
+
type(ticket) == bytes
|
|
2460
|
+
and len(ticket) == LXMessage.TICKET_LENGTH
|
|
2461
|
+
):
|
|
2462
|
+
self.remember_ticket(message.source_hash, ticket_entry)
|
|
2463
|
+
|
|
2464
|
+
def save_job():
|
|
2465
|
+
self.save_available_tickets()
|
|
2466
|
+
|
|
2467
|
+
threading.Thread(target=save_job, daemon=True).start()
|
|
2468
|
+
|
|
2469
|
+
required_stamp_cost = self.delivery_destinations[
|
|
2470
|
+
message.destination_hash
|
|
2471
|
+
].stamp_cost
|
|
2472
|
+
if required_stamp_cost != None:
|
|
2473
|
+
destination_tickets = self.get_inbound_tickets(message.source_hash)
|
|
2474
|
+
if message.validate_stamp(
|
|
2475
|
+
required_stamp_cost, tickets=destination_tickets
|
|
2476
|
+
):
|
|
2477
|
+
message.stamp_valid = True
|
|
2478
|
+
message.stamp_checked = True
|
|
2479
|
+
else:
|
|
2480
|
+
message.stamp_valid = False
|
|
2481
|
+
message.stamp_checked = True
|
|
2482
|
+
|
|
2483
|
+
if not message.stamp_valid:
|
|
2484
|
+
if no_stamp_enforcement:
|
|
2485
|
+
RNS.log(
|
|
2486
|
+
f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement was temporarily disabled",
|
|
2487
|
+
RNS.LOG_NOTICE,
|
|
2488
|
+
)
|
|
2489
|
+
else:
|
|
2490
|
+
if self._enforce_stamps:
|
|
2491
|
+
RNS.log(
|
|
2492
|
+
f"Dropping {message} with invalid stamp", RNS.LOG_NOTICE
|
|
2493
|
+
)
|
|
2494
|
+
return False
|
|
2495
|
+
else:
|
|
2496
|
+
RNS.log(
|
|
2497
|
+
f"Received {message} with invalid stamp, but allowing anyway, since stamp enforcement is disabled",
|
|
2498
|
+
RNS.LOG_NOTICE,
|
|
2499
|
+
)
|
|
2500
|
+
else:
|
|
2501
|
+
RNS.log(f"Received {message} with valid stamp", RNS.LOG_DEBUG)
|
|
2502
|
+
|
|
2503
|
+
if phy_stats != None:
|
|
2504
|
+
if "rssi" in phy_stats:
|
|
2505
|
+
message.rssi = phy_stats["rssi"]
|
|
2506
|
+
if "snr" in phy_stats:
|
|
2507
|
+
message.snr = phy_stats["snr"]
|
|
2508
|
+
if "q" in phy_stats:
|
|
2509
|
+
message.q = phy_stats["q"]
|
|
2510
|
+
|
|
2511
|
+
# TODO: Update these descriptions to account for ratchets
|
|
2512
|
+
if destination_type == RNS.Destination.SINGLE:
|
|
2513
|
+
message.transport_encrypted = True
|
|
2514
|
+
message.transport_encryption = LXMessage.ENCRYPTION_DESCRIPTION_EC
|
|
2515
|
+
elif destination_type == RNS.Destination.GROUP:
|
|
2516
|
+
message.transport_encrypted = True
|
|
2517
|
+
message.transport_encryption = LXMessage.ENCRYPTION_DESCRIPTION_AES
|
|
2518
|
+
elif destination_type == RNS.Destination.LINK:
|
|
2519
|
+
message.transport_encrypted = True
|
|
2520
|
+
message.transport_encryption = LXMessage.ENCRYPTION_DESCRIPTION_EC
|
|
2521
|
+
else:
|
|
2522
|
+
message.transport_encrypted = False
|
|
2523
|
+
message.transport_encryption = None
|
|
2524
|
+
|
|
2525
|
+
if message.source_hash in self.ignored_list:
|
|
2526
|
+
RNS.log(
|
|
2527
|
+
str(self)
|
|
2528
|
+
+ " ignored message from "
|
|
2529
|
+
+ RNS.prettyhexrep(message.source_hash),
|
|
2530
|
+
RNS.LOG_DEBUG,
|
|
2531
|
+
)
|
|
2532
|
+
return False
|
|
2533
|
+
|
|
2534
|
+
if not allow_duplicate and self.has_message(message.hash):
|
|
2535
|
+
RNS.log(
|
|
2536
|
+
str(self)
|
|
2537
|
+
+ " ignored already received message from "
|
|
2538
|
+
+ RNS.prettyhexrep(message.source_hash),
|
|
2539
|
+
RNS.LOG_DEBUG,
|
|
2540
|
+
)
|
|
2541
|
+
return False
|
|
2542
|
+
else:
|
|
2543
|
+
self.locally_delivered_transient_ids[message.hash] = time.time()
|
|
2544
|
+
|
|
2545
|
+
if self.__delivery_callback != None and callable(self.__delivery_callback):
|
|
2546
|
+
try:
|
|
2547
|
+
self.__delivery_callback(message)
|
|
2548
|
+
except Exception as e:
|
|
2549
|
+
RNS.log(
|
|
2550
|
+
"An error occurred in the external delivery callback for "
|
|
2551
|
+
+ str(message),
|
|
2552
|
+
RNS.LOG_ERROR,
|
|
2553
|
+
)
|
|
2554
|
+
RNS.trace_exception(e)
|
|
2555
|
+
|
|
2556
|
+
return True
|
|
2557
|
+
|
|
2558
|
+
except Exception as e:
|
|
2559
|
+
RNS.log(
|
|
2560
|
+
"Could not assemble LXMF message from received data", RNS.LOG_NOTICE
|
|
2561
|
+
)
|
|
2562
|
+
RNS.log("The contained exception was: " + str(e), RNS.LOG_DEBUG)
|
|
2563
|
+
return False
|
|
2564
|
+
|
|
2565
|
+
def delivery_packet(self, data, packet):
|
|
2566
|
+
packet.prove()
|
|
2567
|
+
try:
|
|
2568
|
+
method = None
|
|
2569
|
+
if packet.destination_type != RNS.Destination.LINK:
|
|
2570
|
+
method = LXMessage.OPPORTUNISTIC
|
|
2571
|
+
lxmf_data = b""
|
|
2572
|
+
lxmf_data += packet.destination.hash
|
|
2573
|
+
lxmf_data += data
|
|
2574
|
+
else:
|
|
2575
|
+
method = LXMessage.DIRECT
|
|
2576
|
+
lxmf_data = data
|
|
2577
|
+
|
|
2578
|
+
try:
|
|
2579
|
+
reticulum = RNS.Reticulum.get_instance()
|
|
2580
|
+
if packet.rssi == None:
|
|
2581
|
+
packet.rssi = reticulum.get_packet_rssi(packet.packet_hash)
|
|
2582
|
+
if packet.snr == None:
|
|
2583
|
+
packet.snr = reticulum.get_packet_snr(packet.packet_hash)
|
|
2584
|
+
if packet.q == None:
|
|
2585
|
+
packet.q = reticulum.get_packet_q(packet.packet_hash)
|
|
2586
|
+
except Exception as e:
|
|
2587
|
+
RNS.log(
|
|
2588
|
+
"Error while retrieving physical link stats for LXMF delivery packet: "
|
|
2589
|
+
+ str(e),
|
|
2590
|
+
RNS.LOG_ERROR,
|
|
2591
|
+
)
|
|
2592
|
+
|
|
2593
|
+
phy_stats = {"rssi": packet.rssi, "snr": packet.snr, "q": packet.q}
|
|
2594
|
+
|
|
2595
|
+
self.lxmf_delivery(
|
|
2596
|
+
lxmf_data,
|
|
2597
|
+
packet.destination_type,
|
|
2598
|
+
phy_stats=phy_stats,
|
|
2599
|
+
ratchet_id=packet.ratchet_id,
|
|
2600
|
+
method=method,
|
|
2601
|
+
)
|
|
2602
|
+
|
|
2603
|
+
except Exception as e:
|
|
2604
|
+
RNS.log(
|
|
2605
|
+
"Exception occurred while parsing incoming LXMF data.", RNS.LOG_ERROR
|
|
2606
|
+
)
|
|
2607
|
+
RNS.log("The contained exception was: " + str(e), RNS.LOG_ERROR)
|
|
2608
|
+
|
|
2609
|
+
def delivery_link_established(self, link):
|
|
2610
|
+
link.track_phy_stats(True)
|
|
2611
|
+
link.set_packet_callback(self.delivery_packet)
|
|
2612
|
+
link.set_resource_strategy(RNS.Link.ACCEPT_APP)
|
|
2613
|
+
link.set_resource_callback(self.delivery_resource_advertised)
|
|
2614
|
+
link.set_resource_started_callback(self.resource_transfer_began)
|
|
2615
|
+
link.set_resource_concluded_callback(self.delivery_resource_concluded)
|
|
2616
|
+
link.set_remote_identified_callback(self.delivery_remote_identified)
|
|
2617
|
+
|
|
2618
|
+
def delivery_link_closed(self, link):
|
|
2619
|
+
pass
|
|
2620
|
+
|
|
2621
|
+
def resource_transfer_began(self, resource):
|
|
2622
|
+
RNS.log(
|
|
2623
|
+
"Transfer began for LXMF delivery resource " + str(resource), RNS.LOG_DEBUG
|
|
2624
|
+
)
|
|
2625
|
+
|
|
2626
|
+
def delivery_resource_advertised(self, resource):
|
|
2627
|
+
size = resource.get_data_size()
|
|
2628
|
+
limit = self.delivery_per_transfer_limit * 1000
|
|
2629
|
+
if limit != None and size > limit:
|
|
2630
|
+
RNS.log(
|
|
2631
|
+
"Rejecting "
|
|
2632
|
+
+ RNS.prettysize(size)
|
|
2633
|
+
+ " incoming LXMF delivery resource, since it exceeds the limit of "
|
|
2634
|
+
+ RNS.prettysize(limit),
|
|
2635
|
+
RNS.LOG_DEBUG,
|
|
2636
|
+
)
|
|
2637
|
+
return False
|
|
2638
|
+
else:
|
|
2639
|
+
return True
|
|
2640
|
+
|
|
2641
|
+
def delivery_resource_concluded(self, resource):
|
|
2642
|
+
RNS.log(
|
|
2643
|
+
"Transfer concluded for LXMF delivery resource " + str(resource),
|
|
2644
|
+
RNS.LOG_DEBUG,
|
|
2645
|
+
)
|
|
2646
|
+
if resource.status == RNS.Resource.COMPLETE:
|
|
2647
|
+
ratchet_id = None
|
|
2648
|
+
# Set ratchet ID to link ID if available
|
|
2649
|
+
if resource.link and hasattr(resource.link, "link_id"):
|
|
2650
|
+
ratchet_id = resource.link.link_id
|
|
2651
|
+
phy_stats = {
|
|
2652
|
+
"rssi": resource.link.rssi,
|
|
2653
|
+
"snr": resource.link.snr,
|
|
2654
|
+
"q": resource.link.q,
|
|
2655
|
+
}
|
|
2656
|
+
self.lxmf_delivery(
|
|
2657
|
+
resource.data.read(),
|
|
2658
|
+
resource.link.type,
|
|
2659
|
+
phy_stats=phy_stats,
|
|
2660
|
+
ratchet_id=ratchet_id,
|
|
2661
|
+
method=LXMessage.DIRECT,
|
|
2662
|
+
)
|
|
2663
|
+
|
|
2664
|
+
def delivery_remote_identified(self, link, identity):
|
|
2665
|
+
destination_hash = RNS.Destination.hash_from_name_and_identity(
|
|
2666
|
+
"lxmf.delivery", identity
|
|
2667
|
+
)
|
|
2668
|
+
self.backchannel_links[destination_hash] = link
|
|
2669
|
+
RNS.log(
|
|
2670
|
+
f"Backchannel became available for {RNS.prettyhexrep(destination_hash)} on delivery link {link}",
|
|
2671
|
+
RNS.LOG_DEBUG,
|
|
2672
|
+
)
|
|
2673
|
+
|
|
2674
|
+
### Peer Sync & Propagation ###########################
|
|
2675
|
+
#######################################################
|
|
2676
|
+
|
|
2677
|
+
def peer(
|
|
2678
|
+
self,
|
|
2679
|
+
destination_hash,
|
|
2680
|
+
timestamp,
|
|
2681
|
+
propagation_transfer_limit,
|
|
2682
|
+
propagation_sync_limit,
|
|
2683
|
+
propagation_stamp_cost,
|
|
2684
|
+
propagation_stamp_cost_flexibility,
|
|
2685
|
+
peering_cost,
|
|
2686
|
+
metadata,
|
|
2687
|
+
):
|
|
2688
|
+
if peering_cost > self.max_peering_cost:
|
|
2689
|
+
if destination_hash in self.peers:
|
|
2690
|
+
RNS.log(
|
|
2691
|
+
f"Peer {RNS.prettyhexrep(destination_hash)} increased peering cost beyond local accepted maximum, breaking peering...",
|
|
2692
|
+
RNS.LOG_NOTICE,
|
|
2693
|
+
)
|
|
2694
|
+
self.unpeer(destination_hash, timestamp)
|
|
2695
|
+
else:
|
|
2696
|
+
RNS.log(
|
|
2697
|
+
f"Not peering with {RNS.prettyhexrep(destination_hash)}, since its peering cost of {peering_cost} exceeds local maximum of {self.max_peering_cost}",
|
|
2698
|
+
RNS.LOG_NOTICE,
|
|
2699
|
+
)
|
|
2700
|
+
|
|
2701
|
+
else:
|
|
2702
|
+
if destination_hash in self.peers:
|
|
2703
|
+
peer = self.peers[destination_hash]
|
|
2704
|
+
if timestamp > peer.peering_timebase:
|
|
2705
|
+
peer.alive = True
|
|
2706
|
+
peer.metadata = metadata
|
|
2707
|
+
peer.sync_backoff = 0
|
|
2708
|
+
peer.next_sync_attempt = 0
|
|
2709
|
+
peer.peering_timebase = timestamp
|
|
2710
|
+
peer.last_heard = time.time()
|
|
2711
|
+
peer.propagation_stamp_cost = propagation_stamp_cost
|
|
2712
|
+
peer.propagation_stamp_cost_flexibility = (
|
|
2713
|
+
propagation_stamp_cost_flexibility
|
|
2714
|
+
)
|
|
2715
|
+
peer.peering_cost = peering_cost
|
|
2716
|
+
peer.propagation_transfer_limit = propagation_transfer_limit
|
|
2717
|
+
if propagation_sync_limit != None:
|
|
2718
|
+
peer.propagation_sync_limit = propagation_sync_limit
|
|
2719
|
+
else:
|
|
2720
|
+
peer.propagation_sync_limit = propagation_transfer_limit
|
|
2721
|
+
|
|
2722
|
+
RNS.log(
|
|
2723
|
+
f"Peering config updated for {RNS.prettyhexrep(destination_hash)}",
|
|
2724
|
+
RNS.LOG_VERBOSE,
|
|
2725
|
+
)
|
|
2726
|
+
|
|
2727
|
+
else:
|
|
2728
|
+
if len(self.peers) >= self.max_peers:
|
|
2729
|
+
RNS.log(
|
|
2730
|
+
f"Max peers reached, not peering with {RNS.prettyhexrep(destination_hash)}",
|
|
2731
|
+
RNS.LOG_DEBUG,
|
|
2732
|
+
)
|
|
2733
|
+
else:
|
|
2734
|
+
peer = LXMPeer(
|
|
2735
|
+
self, destination_hash, sync_strategy=self.default_sync_strategy
|
|
2736
|
+
)
|
|
2737
|
+
peer.alive = True
|
|
2738
|
+
peer.metadata = metadata
|
|
2739
|
+
peer.last_heard = time.time()
|
|
2740
|
+
peer.propagation_stamp_cost = propagation_stamp_cost
|
|
2741
|
+
peer.propagation_stamp_cost_flexibility = (
|
|
2742
|
+
propagation_stamp_cost_flexibility
|
|
2743
|
+
)
|
|
2744
|
+
peer.peering_cost = peering_cost
|
|
2745
|
+
peer.propagation_transfer_limit = propagation_transfer_limit
|
|
2746
|
+
if propagation_sync_limit != None:
|
|
2747
|
+
peer.propagation_sync_limit = propagation_sync_limit
|
|
2748
|
+
else:
|
|
2749
|
+
peer.propagation_sync_limit = propagation_transfer_limit
|
|
2750
|
+
|
|
2751
|
+
self.peers[destination_hash] = peer
|
|
2752
|
+
RNS.log(
|
|
2753
|
+
f"Peered with {RNS.prettyhexrep(destination_hash)}",
|
|
2754
|
+
RNS.LOG_NOTICE,
|
|
2755
|
+
)
|
|
2756
|
+
|
|
2757
|
+
def unpeer(self, destination_hash, timestamp=None):
|
|
2758
|
+
if timestamp == None:
|
|
2759
|
+
timestamp = int(time.time())
|
|
2760
|
+
|
|
2761
|
+
if destination_hash in self.peers:
|
|
2762
|
+
peer = self.peers[destination_hash]
|
|
2763
|
+
|
|
2764
|
+
if timestamp >= peer.peering_timebase:
|
|
2765
|
+
self.peers.pop(destination_hash)
|
|
2766
|
+
RNS.log("Broke peering with " + str(peer.destination))
|
|
2767
|
+
|
|
2768
|
+
def rotate_peers(self):
|
|
2769
|
+
try:
|
|
2770
|
+
rotation_headroom = max(
|
|
2771
|
+
1,
|
|
2772
|
+
math.floor(self.max_peers * (LXMRouter.ROTATION_HEADROOM_PCT / 100.0)),
|
|
2773
|
+
)
|
|
2774
|
+
required_drops = len(self.peers) - (self.max_peers - rotation_headroom)
|
|
2775
|
+
if required_drops > 0 and len(self.peers) - required_drops > 1:
|
|
2776
|
+
peers = self.peers.copy()
|
|
2777
|
+
untested_peers = []
|
|
2778
|
+
for peer_id in self.peers:
|
|
2779
|
+
peer = self.peers[peer_id]
|
|
2780
|
+
if peer.last_sync_attempt == 0:
|
|
2781
|
+
untested_peers.append(peer)
|
|
2782
|
+
|
|
2783
|
+
if len(untested_peers) >= rotation_headroom:
|
|
2784
|
+
RNS.log(
|
|
2785
|
+
"Newly added peer threshold reached, postponing peer rotation",
|
|
2786
|
+
RNS.LOG_DEBUG,
|
|
2787
|
+
)
|
|
2788
|
+
return
|
|
2789
|
+
|
|
2790
|
+
fully_synced_peers = {}
|
|
2791
|
+
for peer_id in peers:
|
|
2792
|
+
peer = peers[peer_id]
|
|
2793
|
+
if peer.unhandled_message_count == 0:
|
|
2794
|
+
fully_synced_peers[peer_id] = peer
|
|
2795
|
+
|
|
2796
|
+
if len(fully_synced_peers) > 0:
|
|
2797
|
+
peers = fully_synced_peers
|
|
2798
|
+
ms = "" if len(fully_synced_peers) == 1 else "s"
|
|
2799
|
+
RNS.log(
|
|
2800
|
+
f"Found {len(fully_synced_peers)} fully synced peer{ms}, using as peer rotation pool basis",
|
|
2801
|
+
RNS.LOG_DEBUG,
|
|
2802
|
+
)
|
|
2803
|
+
|
|
2804
|
+
culled_peers = []
|
|
2805
|
+
waiting_peers = []
|
|
2806
|
+
unresponsive_peers = []
|
|
2807
|
+
for peer_id in peers:
|
|
2808
|
+
peer = peers[peer_id]
|
|
2809
|
+
if not peer_id in self.static_peers and peer.state == LXMPeer.IDLE:
|
|
2810
|
+
if peer.alive:
|
|
2811
|
+
if peer.offered == 0:
|
|
2812
|
+
# Don't consider for unpeering until at
|
|
2813
|
+
# least one message has been offered
|
|
2814
|
+
pass
|
|
2815
|
+
else:
|
|
2816
|
+
waiting_peers.append(peer)
|
|
2817
|
+
else:
|
|
2818
|
+
unresponsive_peers.append(peer)
|
|
2819
|
+
|
|
2820
|
+
drop_pool = []
|
|
2821
|
+
if len(unresponsive_peers) > 0:
|
|
2822
|
+
drop_pool.extend(unresponsive_peers)
|
|
2823
|
+
if not self.prioritise_rotating_unreachable_peers:
|
|
2824
|
+
drop_pool.extend(waiting_peers)
|
|
2825
|
+
|
|
2826
|
+
else:
|
|
2827
|
+
drop_pool.extend(waiting_peers)
|
|
2828
|
+
|
|
2829
|
+
if len(drop_pool) > 0:
|
|
2830
|
+
drop_count = min(required_drops, len(drop_pool))
|
|
2831
|
+
low_acceptance_rate_peers = sorted(
|
|
2832
|
+
drop_pool,
|
|
2833
|
+
key=lambda p: (
|
|
2834
|
+
0 if p.offered == 0 else (p.outgoing / p.offered)
|
|
2835
|
+
),
|
|
2836
|
+
reverse=False,
|
|
2837
|
+
)[0:drop_count]
|
|
2838
|
+
|
|
2839
|
+
dropped_peers = 0
|
|
2840
|
+
for peer in low_acceptance_rate_peers:
|
|
2841
|
+
ar = (
|
|
2842
|
+
0
|
|
2843
|
+
if peer.offered == 0
|
|
2844
|
+
else round((peer.outgoing / peer.offered) * 100, 2)
|
|
2845
|
+
)
|
|
2846
|
+
if ar < LXMRouter.ROTATION_AR_MAX * 100:
|
|
2847
|
+
reachable_str = "reachable" if peer.alive else "unreachable"
|
|
2848
|
+
RNS.log(
|
|
2849
|
+
f"Acceptance rate for {reachable_str} peer {RNS.prettyhexrep(peer.destination_hash)} was: {ar}% ({peer.outgoing}/{peer.offered}, {peer.unhandled_message_count} unhandled messages)",
|
|
2850
|
+
RNS.LOG_DEBUG,
|
|
2851
|
+
)
|
|
2852
|
+
self.unpeer(peer.destination_hash)
|
|
2853
|
+
dropped_peers += 1
|
|
2854
|
+
|
|
2855
|
+
ms = "" if dropped_peers == 1 else "s"
|
|
2856
|
+
RNS.log(
|
|
2857
|
+
f"Dropped {dropped_peers} low acceptance rate peer{ms} to increase peering headroom",
|
|
2858
|
+
RNS.LOG_DEBUG,
|
|
2859
|
+
)
|
|
2860
|
+
|
|
2861
|
+
except Exception as e:
|
|
2862
|
+
RNS.log(f"An error occurred during peer rotation: {e}", RNS.LOG_ERROR)
|
|
2863
|
+
RNS.trace_exception(e)
|
|
2864
|
+
|
|
2865
|
+
def sync_peers(self):
|
|
2866
|
+
culled_peers = []
|
|
2867
|
+
waiting_peers = []
|
|
2868
|
+
unresponsive_peers = []
|
|
2869
|
+
peers = self.peers.copy()
|
|
2870
|
+
for peer_id in peers:
|
|
2871
|
+
peer = peers[peer_id]
|
|
2872
|
+
if time.time() > peer.last_heard + LXMPeer.MAX_UNREACHABLE:
|
|
2873
|
+
if not peer_id in self.static_peers:
|
|
2874
|
+
culled_peers.append(peer_id)
|
|
2875
|
+
|
|
2876
|
+
else:
|
|
2877
|
+
if peer.state == LXMPeer.IDLE and len(peer.unhandled_messages) > 0:
|
|
2878
|
+
if peer.alive:
|
|
2879
|
+
waiting_peers.append(peer)
|
|
2880
|
+
else:
|
|
2881
|
+
if (
|
|
2882
|
+
hasattr(peer, "next_sync_attempt")
|
|
2883
|
+
and time.time() > peer.next_sync_attempt
|
|
2884
|
+
):
|
|
2885
|
+
unresponsive_peers.append(peer)
|
|
2886
|
+
else:
|
|
2887
|
+
pass # RNS.log("Not adding peer "+str(peer)+" since it is in sync backoff", RNS.LOG_DEBUG)
|
|
2888
|
+
|
|
2889
|
+
peer_pool = []
|
|
2890
|
+
if len(waiting_peers) > 0:
|
|
2891
|
+
fastest_peers = sorted(
|
|
2892
|
+
waiting_peers, key=lambda p: p.sync_transfer_rate, reverse=True
|
|
2893
|
+
)[0 : min(LXMRouter.FASTEST_N_RANDOM_POOL, len(waiting_peers))]
|
|
2894
|
+
peer_pool.extend(fastest_peers)
|
|
2895
|
+
|
|
2896
|
+
unknown_speed_peers = [
|
|
2897
|
+
p for p in waiting_peers if p.sync_transfer_rate == 0
|
|
2898
|
+
]
|
|
2899
|
+
if len(unknown_speed_peers) > 0:
|
|
2900
|
+
peer_pool.extend(
|
|
2901
|
+
unknown_speed_peers[
|
|
2902
|
+
0 : min(len(unknown_speed_peers), len(fastest_peers))
|
|
2903
|
+
]
|
|
2904
|
+
)
|
|
2905
|
+
|
|
2906
|
+
RNS.log(
|
|
2907
|
+
"Selecting peer to sync from "
|
|
2908
|
+
+ str(len(waiting_peers))
|
|
2909
|
+
+ " waiting peers.",
|
|
2910
|
+
RNS.LOG_DEBUG,
|
|
2911
|
+
)
|
|
2912
|
+
|
|
2913
|
+
elif len(unresponsive_peers) > 0:
|
|
2914
|
+
RNS.log(
|
|
2915
|
+
"No active peers available, randomly selecting peer to sync from "
|
|
2916
|
+
+ str(len(unresponsive_peers))
|
|
2917
|
+
+ " unresponsive peers.",
|
|
2918
|
+
RNS.LOG_DEBUG,
|
|
2919
|
+
)
|
|
2920
|
+
peer_pool = unresponsive_peers
|
|
2921
|
+
|
|
2922
|
+
if len(peer_pool) > 0:
|
|
2923
|
+
selected_index = random.randint(0, len(peer_pool) - 1)
|
|
2924
|
+
selected_peer = peer_pool[selected_index]
|
|
2925
|
+
RNS.log(
|
|
2926
|
+
"Selected waiting peer "
|
|
2927
|
+
+ str(selected_index)
|
|
2928
|
+
+ ": "
|
|
2929
|
+
+ RNS.prettyhexrep(selected_peer.destination.hash),
|
|
2930
|
+
RNS.LOG_DEBUG,
|
|
2931
|
+
)
|
|
2932
|
+
selected_peer.sync()
|
|
2933
|
+
|
|
2934
|
+
for peer_id in culled_peers:
|
|
2935
|
+
RNS.log(
|
|
2936
|
+
"Removing peer "
|
|
2937
|
+
+ RNS.prettyhexrep(peer_id)
|
|
2938
|
+
+ " due to excessive unreachability",
|
|
2939
|
+
RNS.LOG_WARNING,
|
|
2940
|
+
)
|
|
2941
|
+
try:
|
|
2942
|
+
if peer_id in self.peers:
|
|
2943
|
+
self.peers.pop(peer_id)
|
|
2944
|
+
except Exception as e:
|
|
2945
|
+
RNS.log(
|
|
2946
|
+
"Error while removing peer "
|
|
2947
|
+
+ RNS.prettyhexrep(peer_id)
|
|
2948
|
+
+ ". The contained exception was: "
|
|
2949
|
+
+ str(e),
|
|
2950
|
+
RNS.LOG_ERROR,
|
|
2951
|
+
)
|
|
2952
|
+
|
|
2953
|
+
def propagation_link_established(self, link):
|
|
2954
|
+
link.set_packet_callback(self.propagation_packet)
|
|
2955
|
+
link.set_resource_strategy(RNS.Link.ACCEPT_APP)
|
|
2956
|
+
link.set_resource_callback(self.propagation_resource_advertised)
|
|
2957
|
+
link.set_resource_started_callback(self.resource_transfer_began)
|
|
2958
|
+
link.set_resource_concluded_callback(self.propagation_resource_concluded)
|
|
2959
|
+
self.active_propagation_links.append(link)
|
|
2960
|
+
|
|
2961
|
+
def propagation_resource_advertised(self, resource):
|
|
2962
|
+
if self.from_static_only:
|
|
2963
|
+
remote_identity = resource.link.get_remote_identity()
|
|
2964
|
+
if remote_identity == None:
|
|
2965
|
+
RNS.log(
|
|
2966
|
+
f"Rejecting propagation resource from unidentified peer",
|
|
2967
|
+
RNS.LOG_DEBUG,
|
|
2968
|
+
)
|
|
2969
|
+
return False
|
|
2970
|
+
else:
|
|
2971
|
+
remote_destination = RNS.Destination(
|
|
2972
|
+
remote_identity,
|
|
2973
|
+
RNS.Destination.OUT,
|
|
2974
|
+
RNS.Destination.SINGLE,
|
|
2975
|
+
APP_NAME,
|
|
2976
|
+
"propagation",
|
|
2977
|
+
)
|
|
2978
|
+
remote_hash = remote_destination.hash
|
|
2979
|
+
remote_str = RNS.prettyhexrep(remote_hash)
|
|
2980
|
+
if not remote_hash in self.static_peers:
|
|
2981
|
+
RNS.log(
|
|
2982
|
+
f"Rejecting propagation resource from {remote_str} not in static peers list",
|
|
2983
|
+
RNS.LOG_DEBUG,
|
|
2984
|
+
)
|
|
2985
|
+
return False
|
|
2986
|
+
|
|
2987
|
+
size = resource.get_data_size()
|
|
2988
|
+
limit = self.propagation_per_sync_limit * 1000
|
|
2989
|
+
if limit != None and size > limit:
|
|
2990
|
+
RNS.log(
|
|
2991
|
+
f"Rejecting {RNS.prettysize(size)} incoming propagation resource, since it exceeds the limit of {RNS.prettysize(limit)}",
|
|
2992
|
+
RNS.LOG_DEBUG,
|
|
2993
|
+
)
|
|
2994
|
+
return False
|
|
2995
|
+
else:
|
|
2996
|
+
return True
|
|
2997
|
+
|
|
2998
|
+
def propagation_packet(self, data, packet):
|
|
2999
|
+
try:
|
|
3000
|
+
if packet.destination_type != RNS.Destination.LINK:
|
|
3001
|
+
return
|
|
3002
|
+
else:
|
|
3003
|
+
data = msgpack.unpackb(data)
|
|
3004
|
+
remote_timebase = data[0]
|
|
3005
|
+
messages = data[1]
|
|
3006
|
+
|
|
3007
|
+
min_accepted_cost = max(
|
|
3008
|
+
0,
|
|
3009
|
+
self.propagation_stamp_cost
|
|
3010
|
+
- self.propagation_stamp_cost_flexibility,
|
|
3011
|
+
)
|
|
3012
|
+
validated_messages = LXStamper.validate_pn_stamps(
|
|
3013
|
+
messages, min_accepted_cost
|
|
3014
|
+
)
|
|
3015
|
+
|
|
3016
|
+
for validated_entry in validated_messages:
|
|
3017
|
+
lxmf_data = validated_entry[1]
|
|
3018
|
+
stamp_value = validated_entry[2]
|
|
3019
|
+
stamp_data = validated_entry[3]
|
|
3020
|
+
self.lxmf_propagation(
|
|
3021
|
+
lxmf_data, stamp_value=stamp_value, stamp_data=stamp_data
|
|
3022
|
+
)
|
|
3023
|
+
self.client_propagation_messages_received += 1
|
|
3024
|
+
|
|
3025
|
+
if len(validated_messages) == len(messages):
|
|
3026
|
+
ms = "" if len(messages) == 1 else "s"
|
|
3027
|
+
RNS.log(
|
|
3028
|
+
f"Received {len(messages)} propagation message{ms} from client with valid stamp{ms}",
|
|
3029
|
+
RNS.LOG_DEBUG,
|
|
3030
|
+
)
|
|
3031
|
+
packet.prove()
|
|
3032
|
+
else:
|
|
3033
|
+
RNS.log(
|
|
3034
|
+
"Propagation transfer from client contained messages with invalid stamps",
|
|
3035
|
+
RNS.LOG_NOTICE,
|
|
3036
|
+
)
|
|
3037
|
+
reject_data = msgpack.packb([LXMPeer.ERROR_INVALID_STAMP])
|
|
3038
|
+
RNS.Packet(packet.link, reject_data).send()
|
|
3039
|
+
packet.link.teardown()
|
|
3040
|
+
|
|
3041
|
+
except Exception as e:
|
|
3042
|
+
RNS.log(
|
|
3043
|
+
"Exception occurred while parsing incoming LXMF propagation data.",
|
|
3044
|
+
RNS.LOG_ERROR,
|
|
3045
|
+
)
|
|
3046
|
+
RNS.log("The contained exception was: " + str(e), RNS.LOG_ERROR)
|
|
3047
|
+
|
|
3048
|
+
def offer_request(
|
|
3049
|
+
self, path, data, request_id, link_id, remote_identity, requested_at
|
|
3050
|
+
):
|
|
3051
|
+
if remote_identity == None:
|
|
3052
|
+
return LXMPeer.ERROR_NO_IDENTITY
|
|
3053
|
+
else:
|
|
3054
|
+
remote_destination = RNS.Destination(
|
|
3055
|
+
remote_identity,
|
|
3056
|
+
RNS.Destination.OUT,
|
|
3057
|
+
RNS.Destination.SINGLE,
|
|
3058
|
+
APP_NAME,
|
|
3059
|
+
"propagation",
|
|
3060
|
+
)
|
|
3061
|
+
remote_hash = remote_destination.hash
|
|
3062
|
+
remote_str = RNS.prettyhexrep(remote_hash)
|
|
3063
|
+
|
|
3064
|
+
if remote_hash in self.throttled_peers:
|
|
3065
|
+
throttle_remaining = self.throttled_peers[remote_hash] - time.time()
|
|
3066
|
+
if throttle_remaining > 0:
|
|
3067
|
+
RNS.log(
|
|
3068
|
+
f"Propagation offer from node {remote_str} rejected, throttled for {RNS.prettytime(throttle_remaining)} more",
|
|
3069
|
+
RNS.LOG_NOTICE,
|
|
3070
|
+
)
|
|
3071
|
+
return LXMPeer.ERROR_THROTTLED
|
|
3072
|
+
else:
|
|
3073
|
+
self.throttled_peers.pop(remote_hash)
|
|
3074
|
+
|
|
3075
|
+
if self.from_static_only:
|
|
3076
|
+
if not remote_hash in self.static_peers:
|
|
3077
|
+
RNS.log(
|
|
3078
|
+
f"Rejecting propagation request from {remote_str} not in static peers list",
|
|
3079
|
+
RNS.LOG_DEBUG,
|
|
3080
|
+
)
|
|
3081
|
+
return LXMPeer.ERROR_NO_ACCESS
|
|
3082
|
+
|
|
3083
|
+
try:
|
|
3084
|
+
if type(data) != list and len(data) < 2:
|
|
3085
|
+
return LXMPeer.ERROR_INVALID_DATA
|
|
3086
|
+
|
|
3087
|
+
peering_id = self.identity.hash + remote_identity.hash
|
|
3088
|
+
target_cost = self.peering_cost
|
|
3089
|
+
peering_key = data[0]
|
|
3090
|
+
transient_ids = data[1]
|
|
3091
|
+
wanted_ids = []
|
|
3092
|
+
|
|
3093
|
+
ts = time.time()
|
|
3094
|
+
peering_key_valid = LXStamper.validate_peering_key(
|
|
3095
|
+
peering_id, peering_key, target_cost
|
|
3096
|
+
)
|
|
3097
|
+
td = time.time() - ts
|
|
3098
|
+
|
|
3099
|
+
if not peering_key_valid:
|
|
3100
|
+
RNS.log(
|
|
3101
|
+
f"Invalid peering key for incoming sync offer", RNS.LOG_DEBUG
|
|
3102
|
+
)
|
|
3103
|
+
return LXMPeer.ERROR_INVALID_KEY
|
|
3104
|
+
|
|
3105
|
+
else:
|
|
3106
|
+
RNS.log(
|
|
3107
|
+
f"Peering key validated for incoming offer in {RNS.prettytime(td)}",
|
|
3108
|
+
RNS.LOG_DEBUG,
|
|
3109
|
+
)
|
|
3110
|
+
self.validated_peer_links[link_id] = True
|
|
3111
|
+
for transient_id in transient_ids:
|
|
3112
|
+
if not transient_id in self.propagation_entries:
|
|
3113
|
+
wanted_ids.append(transient_id)
|
|
3114
|
+
|
|
3115
|
+
if len(wanted_ids) == 0:
|
|
3116
|
+
return False
|
|
3117
|
+
elif len(wanted_ids) == len(transient_ids):
|
|
3118
|
+
return True
|
|
3119
|
+
else:
|
|
3120
|
+
return wanted_ids
|
|
3121
|
+
|
|
3122
|
+
except Exception as e:
|
|
3123
|
+
RNS.log(
|
|
3124
|
+
"Error occurred while generating response for sync request, the contained exception was: "
|
|
3125
|
+
+ str(e),
|
|
3126
|
+
RNS.LOG_DEBUG,
|
|
3127
|
+
)
|
|
3128
|
+
RNS.trace_exception(e)
|
|
3129
|
+
return None
|
|
3130
|
+
|
|
3131
|
+
def propagation_resource_concluded(self, resource):
|
|
3132
|
+
if resource.status == RNS.Resource.COMPLETE:
|
|
3133
|
+
try:
|
|
3134
|
+
data = msgpack.unpackb(resource.data.read())
|
|
3135
|
+
|
|
3136
|
+
if (
|
|
3137
|
+
type(data) == list
|
|
3138
|
+
and len(data) == 2
|
|
3139
|
+
and type(data[0] == float)
|
|
3140
|
+
and type(data[1]) == list
|
|
3141
|
+
):
|
|
3142
|
+
# This is a series of propagation messages from a peer or originator
|
|
3143
|
+
remote_identity = resource.link.get_remote_identity()
|
|
3144
|
+
remote_timebase = data[0]
|
|
3145
|
+
messages = data[1]
|
|
3146
|
+
remote_hash = None
|
|
3147
|
+
remote_str = "unknown client"
|
|
3148
|
+
|
|
3149
|
+
if remote_identity != None:
|
|
3150
|
+
remote_destination = RNS.Destination(
|
|
3151
|
+
remote_identity,
|
|
3152
|
+
RNS.Destination.OUT,
|
|
3153
|
+
RNS.Destination.SINGLE,
|
|
3154
|
+
APP_NAME,
|
|
3155
|
+
"propagation",
|
|
3156
|
+
)
|
|
3157
|
+
remote_hash = remote_destination.hash
|
|
3158
|
+
remote_app_data = RNS.Identity.recall_app_data(remote_hash)
|
|
3159
|
+
remote_str = RNS.prettyhexrep(remote_hash)
|
|
3160
|
+
|
|
3161
|
+
if remote_hash in self.peers:
|
|
3162
|
+
remote_str = f"peer {remote_str}"
|
|
3163
|
+
else:
|
|
3164
|
+
if pn_announce_data_is_valid(remote_app_data):
|
|
3165
|
+
# 1: Current node timebase
|
|
3166
|
+
# 2: Boolean flag signalling propagation node state
|
|
3167
|
+
# 3: Per-transfer limit for message propagation in kilobytes
|
|
3168
|
+
# 4: Limit for incoming propagation node syncs
|
|
3169
|
+
# 5: Propagation stamp costs for this node
|
|
3170
|
+
# 6: Node metadata
|
|
3171
|
+
if (
|
|
3172
|
+
remote_app_data[2]
|
|
3173
|
+
and self.autopeer
|
|
3174
|
+
and RNS.Transport.hops_to(remote_hash)
|
|
3175
|
+
<= self.autopeer_maxdepth
|
|
3176
|
+
):
|
|
3177
|
+
remote_timebase = remote_app_data[1]
|
|
3178
|
+
remote_transfer_limit = remote_app_data[3]
|
|
3179
|
+
remote_sync_limit = remote_app_data[4]
|
|
3180
|
+
remote_stamp_cost = remote_app_data[5][0]
|
|
3181
|
+
remote_stamp_flex = remote_app_data[5][1]
|
|
3182
|
+
remote_peering_cost = remote_app_data[5][2]
|
|
3183
|
+
remote_metadata = remote_app_data[6]
|
|
3184
|
+
|
|
3185
|
+
RNS.log(
|
|
3186
|
+
f"Auto-peering with {remote_str} discovered via incoming sync",
|
|
3187
|
+
RNS.LOG_DEBUG,
|
|
3188
|
+
) # TODO: Remove debug
|
|
3189
|
+
self.peer(
|
|
3190
|
+
remote_hash,
|
|
3191
|
+
remote_timebase,
|
|
3192
|
+
remote_transfer_limit,
|
|
3193
|
+
remote_sync_limit,
|
|
3194
|
+
remote_stamp_cost,
|
|
3195
|
+
remote_stamp_flex,
|
|
3196
|
+
remote_peering_cost,
|
|
3197
|
+
remote_metadata,
|
|
3198
|
+
)
|
|
3199
|
+
|
|
3200
|
+
peering_key_valid = False
|
|
3201
|
+
if remote_identity != None:
|
|
3202
|
+
if (
|
|
3203
|
+
resource.link.link_id in self.validated_peer_links
|
|
3204
|
+
and self.validated_peer_links[resource.link.link_id] == True
|
|
3205
|
+
):
|
|
3206
|
+
peering_key_valid = True
|
|
3207
|
+
|
|
3208
|
+
if not peering_key_valid and len(messages) > 1:
|
|
3209
|
+
resource.link.teardown()
|
|
3210
|
+
RNS.log(
|
|
3211
|
+
f"Received multiple propagation messages from {remote_str} without valid peering key presentation. This is not supposed to happen, ignoring.",
|
|
3212
|
+
RNS.LOG_WARNING,
|
|
3213
|
+
)
|
|
3214
|
+
RNS.log(
|
|
3215
|
+
f"Clients and peers without a valid peering key can only deliver 1 message per transfer.",
|
|
3216
|
+
RNS.LOG_WARNING,
|
|
3217
|
+
)
|
|
3218
|
+
else:
|
|
3219
|
+
ms = "" if len(messages) == 1 else "s"
|
|
3220
|
+
RNS.log(
|
|
3221
|
+
f"Received {len(messages)} message{ms} from {remote_str}, validating stamps...",
|
|
3222
|
+
RNS.LOG_VERBOSE,
|
|
3223
|
+
)
|
|
3224
|
+
|
|
3225
|
+
min_accepted_cost = max(
|
|
3226
|
+
0,
|
|
3227
|
+
self.propagation_stamp_cost
|
|
3228
|
+
- self.propagation_stamp_cost_flexibility,
|
|
3229
|
+
)
|
|
3230
|
+
validated_messages = LXStamper.validate_pn_stamps(
|
|
3231
|
+
messages, min_accepted_cost
|
|
3232
|
+
)
|
|
3233
|
+
invalid_stamps = len(messages) - len(validated_messages)
|
|
3234
|
+
ms = "" if invalid_stamps == 1 else "s"
|
|
3235
|
+
if len(validated_messages) == len(messages):
|
|
3236
|
+
RNS.log(
|
|
3237
|
+
f"All message stamps validated from {remote_str}",
|
|
3238
|
+
RNS.LOG_VERBOSE,
|
|
3239
|
+
)
|
|
3240
|
+
else:
|
|
3241
|
+
RNS.log(
|
|
3242
|
+
f"Transfer from {remote_str} contained {invalid_stamps} invalid stamp{ms}",
|
|
3243
|
+
RNS.LOG_WARNING,
|
|
3244
|
+
)
|
|
3245
|
+
|
|
3246
|
+
for validated_entry in validated_messages:
|
|
3247
|
+
transient_id = validated_entry[0]
|
|
3248
|
+
lxmf_data = validated_entry[1]
|
|
3249
|
+
stamp_value = validated_entry[2]
|
|
3250
|
+
stamp_data = validated_entry[3]
|
|
3251
|
+
peer = None
|
|
3252
|
+
|
|
3253
|
+
if remote_hash != None and remote_hash in self.peers:
|
|
3254
|
+
peer = self.peers[remote_hash]
|
|
3255
|
+
peer.incoming += 1
|
|
3256
|
+
peer.rx_bytes += len(lxmf_data)
|
|
3257
|
+
else:
|
|
3258
|
+
if remote_identity != None:
|
|
3259
|
+
self.unpeered_propagation_incoming += 1
|
|
3260
|
+
self.unpeered_propagation_rx_bytes += len(lxmf_data)
|
|
3261
|
+
else:
|
|
3262
|
+
self.client_propagation_messages_received += 1
|
|
3263
|
+
|
|
3264
|
+
self.lxmf_propagation(
|
|
3265
|
+
lxmf_data,
|
|
3266
|
+
from_peer=peer,
|
|
3267
|
+
stamp_value=stamp_value,
|
|
3268
|
+
stamp_data=stamp_data,
|
|
3269
|
+
)
|
|
3270
|
+
if peer != None:
|
|
3271
|
+
peer.queue_handled_message(transient_id)
|
|
3272
|
+
|
|
3273
|
+
invalid_message_count = len(messages) - len(validated_messages)
|
|
3274
|
+
if invalid_message_count > 0:
|
|
3275
|
+
resource.link.teardown()
|
|
3276
|
+
if remote_identity != None:
|
|
3277
|
+
throttle_time = LXMRouter.PN_STAMP_THROTTLE
|
|
3278
|
+
self.throttled_peers[remote_hash] = (
|
|
3279
|
+
time.time() + throttle_time
|
|
3280
|
+
)
|
|
3281
|
+
ms = "" if invalid_message_count == 1 else "s"
|
|
3282
|
+
RNS.log(
|
|
3283
|
+
f"Propagation transfer from {remote_str} contained {invalid_message_count} message{ms} with invalid stamps, throttled for {RNS.prettytime(throttle_time)}",
|
|
3284
|
+
RNS.LOG_NOTICE,
|
|
3285
|
+
)
|
|
3286
|
+
|
|
3287
|
+
else:
|
|
3288
|
+
RNS.log(
|
|
3289
|
+
"Invalid data structure received at propagation destination, ignoring",
|
|
3290
|
+
RNS.LOG_DEBUG,
|
|
3291
|
+
)
|
|
3292
|
+
|
|
3293
|
+
except Exception as e:
|
|
3294
|
+
RNS.log(
|
|
3295
|
+
"Error while unpacking received propagation resource", RNS.LOG_DEBUG
|
|
3296
|
+
)
|
|
3297
|
+
RNS.trace_exception(e)
|
|
3298
|
+
|
|
3299
|
+
def enqueue_peer_distribution(self, transient_id, from_peer):
|
|
3300
|
+
self.peer_distribution_queue.append([transient_id, from_peer])
|
|
3301
|
+
|
|
3302
|
+
def flush_peer_distribution_queue(self):
|
|
3303
|
+
if len(self.peer_distribution_queue) > 0:
|
|
3304
|
+
entries = []
|
|
3305
|
+
while len(self.peer_distribution_queue) > 0:
|
|
3306
|
+
entries.append(self.peer_distribution_queue.pop())
|
|
3307
|
+
|
|
3308
|
+
for peer_id in self.peers.copy():
|
|
3309
|
+
if peer_id in self.peers:
|
|
3310
|
+
peer = self.peers[peer_id]
|
|
3311
|
+
for entry in entries:
|
|
3312
|
+
transient_id = entry[0]
|
|
3313
|
+
from_peer = entry[1]
|
|
3314
|
+
if peer != from_peer:
|
|
3315
|
+
peer.queue_unhandled_message(transient_id)
|
|
3316
|
+
|
|
3317
|
+
def lxmf_propagation(
|
|
3318
|
+
self,
|
|
3319
|
+
lxmf_data,
|
|
3320
|
+
signal_local_delivery=None,
|
|
3321
|
+
signal_duplicate=None,
|
|
3322
|
+
allow_duplicate=False,
|
|
3323
|
+
is_paper_message=False,
|
|
3324
|
+
from_peer=None,
|
|
3325
|
+
stamp_value=None,
|
|
3326
|
+
stamp_data=None,
|
|
3327
|
+
):
|
|
3328
|
+
if is_paper_message:
|
|
3329
|
+
no_stamp_enforcement = True
|
|
3330
|
+
else:
|
|
3331
|
+
no_stamp_enforcement = False
|
|
3332
|
+
|
|
3333
|
+
try:
|
|
3334
|
+
if len(lxmf_data) >= LXMessage.LXMF_OVERHEAD:
|
|
3335
|
+
transient_id = RNS.Identity.full_hash(lxmf_data)
|
|
3336
|
+
|
|
3337
|
+
if (
|
|
3338
|
+
not transient_id in self.propagation_entries
|
|
3339
|
+
and not transient_id in self.locally_processed_transient_ids
|
|
3340
|
+
) or allow_duplicate == True:
|
|
3341
|
+
received = time.time()
|
|
3342
|
+
destination_hash = lxmf_data[: LXMessage.DESTINATION_LENGTH]
|
|
3343
|
+
|
|
3344
|
+
self.locally_processed_transient_ids[transient_id] = received
|
|
3345
|
+
|
|
3346
|
+
if destination_hash in self.delivery_destinations:
|
|
3347
|
+
delivery_destination = self.delivery_destinations[
|
|
3348
|
+
destination_hash
|
|
3349
|
+
]
|
|
3350
|
+
encrypted_lxmf_data = lxmf_data[LXMessage.DESTINATION_LENGTH :]
|
|
3351
|
+
decrypted_lxmf_data = delivery_destination.decrypt(
|
|
3352
|
+
encrypted_lxmf_data
|
|
3353
|
+
)
|
|
3354
|
+
if decrypted_lxmf_data != None:
|
|
3355
|
+
delivery_data = (
|
|
3356
|
+
lxmf_data[: LXMessage.DESTINATION_LENGTH]
|
|
3357
|
+
+ decrypted_lxmf_data
|
|
3358
|
+
)
|
|
3359
|
+
self.lxmf_delivery(
|
|
3360
|
+
delivery_data,
|
|
3361
|
+
delivery_destination.type,
|
|
3362
|
+
ratchet_id=delivery_destination.latest_ratchet_id,
|
|
3363
|
+
method=LXMessage.PROPAGATED,
|
|
3364
|
+
no_stamp_enforcement=no_stamp_enforcement,
|
|
3365
|
+
allow_duplicate=allow_duplicate,
|
|
3366
|
+
)
|
|
3367
|
+
self.locally_delivered_transient_ids[transient_id] = (
|
|
3368
|
+
time.time()
|
|
3369
|
+
)
|
|
3370
|
+
|
|
3371
|
+
if signal_local_delivery != None:
|
|
3372
|
+
return signal_local_delivery
|
|
3373
|
+
|
|
3374
|
+
else:
|
|
3375
|
+
if self.propagation_node:
|
|
3376
|
+
stamped_data = lxmf_data + stamp_data
|
|
3377
|
+
value_component = (
|
|
3378
|
+
f"_{stamp_value}"
|
|
3379
|
+
if stamp_value and stamp_value > 0
|
|
3380
|
+
else ""
|
|
3381
|
+
)
|
|
3382
|
+
file_path = f"{self.messagepath}/{RNS.hexrep(transient_id, delimit=False)}_{received}{value_component}"
|
|
3383
|
+
msg_file = open(file_path, "wb")
|
|
3384
|
+
msg_file.write(stamped_data)
|
|
3385
|
+
msg_file.close()
|
|
3386
|
+
|
|
3387
|
+
RNS.log(
|
|
3388
|
+
f"Received propagated LXMF message {RNS.prettyhexrep(transient_id)} with stamp value {stamp_value}, adding to peer distribution queues...",
|
|
3389
|
+
RNS.LOG_EXTREME,
|
|
3390
|
+
)
|
|
3391
|
+
self.propagation_entries[transient_id] = [
|
|
3392
|
+
destination_hash,
|
|
3393
|
+
file_path,
|
|
3394
|
+
time.time(),
|
|
3395
|
+
len(stamped_data),
|
|
3396
|
+
[],
|
|
3397
|
+
[],
|
|
3398
|
+
stamp_value,
|
|
3399
|
+
]
|
|
3400
|
+
self.enqueue_peer_distribution(transient_id, from_peer)
|
|
3401
|
+
|
|
3402
|
+
else:
|
|
3403
|
+
# TODO: Add message to sneakernet queues when implemented
|
|
3404
|
+
RNS.log(
|
|
3405
|
+
f"Received propagated LXMF message {RNS.prettyhexrep(transient_id)}, but this instance is not hosting a propagation node, discarding message.",
|
|
3406
|
+
RNS.LOG_DEBUG,
|
|
3407
|
+
)
|
|
3408
|
+
|
|
3409
|
+
return True
|
|
3410
|
+
|
|
3411
|
+
else:
|
|
3412
|
+
if signal_duplicate != None:
|
|
3413
|
+
return signal_duplicate
|
|
3414
|
+
|
|
3415
|
+
else:
|
|
3416
|
+
return False
|
|
3417
|
+
|
|
3418
|
+
return False
|
|
3419
|
+
|
|
3420
|
+
except Exception as e:
|
|
3421
|
+
RNS.log(
|
|
3422
|
+
"Could not assemble propagated LXMF message from received data",
|
|
3423
|
+
RNS.LOG_DEBUG,
|
|
3424
|
+
)
|
|
3425
|
+
RNS.log("The contained exception was: " + str(e), RNS.LOG_DEBUG)
|
|
3426
|
+
RNS.trace_exception(e)
|
|
3427
|
+
return False
|
|
3428
|
+
|
|
3429
|
+
def ingest_lxm_uri(
|
|
3430
|
+
self,
|
|
3431
|
+
uri,
|
|
3432
|
+
signal_local_delivery=None,
|
|
3433
|
+
signal_duplicate=None,
|
|
3434
|
+
allow_duplicate=False,
|
|
3435
|
+
):
|
|
3436
|
+
try:
|
|
3437
|
+
if not uri.lower().startswith(LXMessage.URI_SCHEMA + "://"):
|
|
3438
|
+
RNS.log("Cannot ingest LXM, invalid URI provided.", RNS.LOG_ERROR)
|
|
3439
|
+
return False
|
|
3440
|
+
|
|
3441
|
+
else:
|
|
3442
|
+
lxmf_data = base64.urlsafe_b64decode(
|
|
3443
|
+
uri.replace(LXMessage.URI_SCHEMA + "://", "").replace("/", "")
|
|
3444
|
+
+ "=="
|
|
3445
|
+
)
|
|
3446
|
+
transient_id = RNS.Identity.full_hash(lxmf_data)
|
|
3447
|
+
|
|
3448
|
+
router_propagation_result = self.lxmf_propagation(
|
|
3449
|
+
lxmf_data,
|
|
3450
|
+
signal_local_delivery=signal_local_delivery,
|
|
3451
|
+
signal_duplicate=signal_duplicate,
|
|
3452
|
+
allow_duplicate=allow_duplicate,
|
|
3453
|
+
is_paper_message=True,
|
|
3454
|
+
)
|
|
3455
|
+
if router_propagation_result != False:
|
|
3456
|
+
RNS.log(
|
|
3457
|
+
"LXM with transient ID "
|
|
3458
|
+
+ RNS.prettyhexrep(transient_id)
|
|
3459
|
+
+ " was ingested.",
|
|
3460
|
+
RNS.LOG_DEBUG,
|
|
3461
|
+
)
|
|
3462
|
+
return router_propagation_result
|
|
3463
|
+
else:
|
|
3464
|
+
RNS.log(
|
|
3465
|
+
"No valid LXM could be ingested from the provided URI",
|
|
3466
|
+
RNS.LOG_DEBUG,
|
|
3467
|
+
)
|
|
3468
|
+
return False
|
|
3469
|
+
|
|
3470
|
+
except Exception as e:
|
|
3471
|
+
RNS.log(
|
|
3472
|
+
"Error while decoding URI-encoded LXMF message. The contained exception was: "
|
|
3473
|
+
+ str(e),
|
|
3474
|
+
RNS.LOG_ERROR,
|
|
3475
|
+
)
|
|
3476
|
+
return False
|
|
3477
|
+
|
|
3478
|
+
def fail_message(self, lxmessage):
|
|
3479
|
+
RNS.log(str(lxmessage) + " failed to send", RNS.LOG_DEBUG)
|
|
3480
|
+
|
|
3481
|
+
if lxmessage in self.pending_outbound:
|
|
3482
|
+
self.pending_outbound.remove(lxmessage)
|
|
3483
|
+
|
|
3484
|
+
self.failed_outbound.append(lxmessage)
|
|
3485
|
+
|
|
3486
|
+
if lxmessage.state != LXMessage.REJECTED:
|
|
3487
|
+
lxmessage.state = LXMessage.FAILED
|
|
3488
|
+
|
|
3489
|
+
if lxmessage.failed_callback != None and callable(lxmessage.failed_callback):
|
|
3490
|
+
lxmessage.failed_callback(lxmessage)
|
|
3491
|
+
|
|
3492
|
+
def process_deferred_stamps(self):
|
|
3493
|
+
if len(self.pending_deferred_stamps) > 0:
|
|
3494
|
+
|
|
3495
|
+
if self.stamp_gen_lock.locked():
|
|
3496
|
+
return
|
|
3497
|
+
|
|
3498
|
+
else:
|
|
3499
|
+
with self.stamp_gen_lock:
|
|
3500
|
+
selected_lxm = None
|
|
3501
|
+
selected_message_id = None
|
|
3502
|
+
for message_id in self.pending_deferred_stamps:
|
|
3503
|
+
lxmessage = self.pending_deferred_stamps[message_id]
|
|
3504
|
+
if selected_lxm == None:
|
|
3505
|
+
selected_lxm = lxmessage
|
|
3506
|
+
selected_message_id = message_id
|
|
3507
|
+
|
|
3508
|
+
if selected_lxm != None:
|
|
3509
|
+
if selected_lxm.state == LXMessage.CANCELLED:
|
|
3510
|
+
RNS.log(
|
|
3511
|
+
f"Message cancelled during deferred stamp generation for {selected_lxm}.",
|
|
3512
|
+
RNS.LOG_DEBUG,
|
|
3513
|
+
)
|
|
3514
|
+
selected_lxm.stamp_generation_failed = True
|
|
3515
|
+
self.pending_deferred_stamps.pop(selected_message_id)
|
|
3516
|
+
if selected_lxm.failed_callback != None and callable(
|
|
3517
|
+
selected_lxm.failed_callback
|
|
3518
|
+
):
|
|
3519
|
+
selected_lxm.failed_callback(lxmessage)
|
|
3520
|
+
|
|
3521
|
+
return
|
|
3522
|
+
|
|
3523
|
+
if selected_lxm.defer_stamp:
|
|
3524
|
+
if selected_lxm.stamp == None:
|
|
3525
|
+
stamp_generation_success = False
|
|
3526
|
+
else:
|
|
3527
|
+
stamp_generation_success = True
|
|
3528
|
+
else:
|
|
3529
|
+
stamp_generation_success = True
|
|
3530
|
+
|
|
3531
|
+
if selected_lxm.desired_method == LXMessage.PROPAGATED:
|
|
3532
|
+
if selected_lxm.propagation_stamp == None:
|
|
3533
|
+
propagation_stamp_generation_success = False
|
|
3534
|
+
else:
|
|
3535
|
+
propagation_stamp_generation_success = True
|
|
3536
|
+
else:
|
|
3537
|
+
propagation_stamp_generation_success = True
|
|
3538
|
+
|
|
3539
|
+
if stamp_generation_success == False:
|
|
3540
|
+
RNS.log(
|
|
3541
|
+
f"Starting stamp generation for {selected_lxm}...",
|
|
3542
|
+
RNS.LOG_DEBUG,
|
|
3543
|
+
)
|
|
3544
|
+
generated_stamp = selected_lxm.get_stamp()
|
|
3545
|
+
if generated_stamp:
|
|
3546
|
+
selected_lxm.stamp = generated_stamp
|
|
3547
|
+
selected_lxm.defer_stamp = False
|
|
3548
|
+
selected_lxm.packed = None
|
|
3549
|
+
selected_lxm.pack(payload_updated=True)
|
|
3550
|
+
stamp_generation_success = True
|
|
3551
|
+
RNS.log(
|
|
3552
|
+
f"Stamp generation completed for {selected_lxm}",
|
|
3553
|
+
RNS.LOG_DEBUG,
|
|
3554
|
+
)
|
|
3555
|
+
else:
|
|
3556
|
+
if selected_lxm.state == LXMessage.CANCELLED:
|
|
3557
|
+
RNS.log(
|
|
3558
|
+
f"Message cancelled during deferred stamp generation for {selected_lxm}.",
|
|
3559
|
+
RNS.LOG_DEBUG,
|
|
3560
|
+
)
|
|
3561
|
+
selected_lxm.stamp_generation_failed = True
|
|
3562
|
+
self.pending_deferred_stamps.pop(
|
|
3563
|
+
selected_message_id
|
|
3564
|
+
)
|
|
3565
|
+
if (
|
|
3566
|
+
selected_lxm.failed_callback != None
|
|
3567
|
+
and callable(selected_lxm.failed_callback)
|
|
3568
|
+
):
|
|
3569
|
+
selected_lxm.failed_callback(lxmessage)
|
|
3570
|
+
else:
|
|
3571
|
+
RNS.log(
|
|
3572
|
+
f"Deferred stamp generation did not succeed. Failing {selected_lxm}.",
|
|
3573
|
+
RNS.LOG_ERROR,
|
|
3574
|
+
)
|
|
3575
|
+
selected_lxm.stamp_generation_failed = True
|
|
3576
|
+
self.pending_deferred_stamps.pop(
|
|
3577
|
+
selected_message_id
|
|
3578
|
+
)
|
|
3579
|
+
self.fail_message(selected_lxm)
|
|
3580
|
+
|
|
3581
|
+
if propagation_stamp_generation_success == False:
|
|
3582
|
+
RNS.log(
|
|
3583
|
+
f"Starting propagation stamp generation for {selected_lxm}...",
|
|
3584
|
+
RNS.LOG_DEBUG,
|
|
3585
|
+
)
|
|
3586
|
+
pn_target_cost = self.get_outbound_propagation_cost()
|
|
3587
|
+
if pn_target_cost == None:
|
|
3588
|
+
RNS.log(
|
|
3589
|
+
"Failed to get propagation node stamp cost, cannot generate propagation stamp",
|
|
3590
|
+
RNS.LOG_ERROR,
|
|
3591
|
+
)
|
|
3592
|
+
selected_lxm.stamp_generation_failed = True
|
|
3593
|
+
self.pending_deferred_stamps.pop(selected_message_id)
|
|
3594
|
+
self.fail_message(selected_lxm)
|
|
3595
|
+
|
|
3596
|
+
else:
|
|
3597
|
+
propagation_stamp = selected_lxm.get_propagation_stamp(
|
|
3598
|
+
target_cost=pn_target_cost
|
|
3599
|
+
)
|
|
3600
|
+
if propagation_stamp:
|
|
3601
|
+
selected_lxm.propagation_stamp = propagation_stamp
|
|
3602
|
+
selected_lxm.defer_propagation_stamp = False
|
|
3603
|
+
selected_lxm.packed = None
|
|
3604
|
+
selected_lxm.pack()
|
|
3605
|
+
propagation_stamp_generation_success = True
|
|
3606
|
+
RNS.log(
|
|
3607
|
+
f"Propagation stamp generation completed for {selected_lxm}",
|
|
3608
|
+
RNS.LOG_DEBUG,
|
|
3609
|
+
)
|
|
3610
|
+
else:
|
|
3611
|
+
if selected_lxm.state == LXMessage.CANCELLED:
|
|
3612
|
+
RNS.log(
|
|
3613
|
+
f"Message cancelled during deferred propagation stamp generation for {selected_lxm}.",
|
|
3614
|
+
RNS.LOG_DEBUG,
|
|
3615
|
+
)
|
|
3616
|
+
selected_lxm.stamp_generation_failed = True
|
|
3617
|
+
self.pending_deferred_stamps.pop(
|
|
3618
|
+
selected_message_id
|
|
3619
|
+
)
|
|
3620
|
+
if (
|
|
3621
|
+
selected_lxm.failed_callback != None
|
|
3622
|
+
and callable(selected_lxm.failed_callback)
|
|
3623
|
+
):
|
|
3624
|
+
selected_lxm.failed_callback(lxmessage)
|
|
3625
|
+
else:
|
|
3626
|
+
RNS.log(
|
|
3627
|
+
f"Deferred propagation stamp generation did not succeed. Failing {selected_lxm}.",
|
|
3628
|
+
RNS.LOG_ERROR,
|
|
3629
|
+
)
|
|
3630
|
+
selected_lxm.stamp_generation_failed = True
|
|
3631
|
+
self.pending_deferred_stamps.pop(
|
|
3632
|
+
selected_message_id
|
|
3633
|
+
)
|
|
3634
|
+
self.fail_message(selected_lxm)
|
|
3635
|
+
|
|
3636
|
+
if (
|
|
3637
|
+
stamp_generation_success
|
|
3638
|
+
and propagation_stamp_generation_success
|
|
3639
|
+
):
|
|
3640
|
+
self.pending_deferred_stamps.pop(selected_message_id)
|
|
3641
|
+
self.pending_outbound.append(selected_lxm)
|
|
3642
|
+
|
|
3643
|
+
def propagation_transfer_signalling_packet(self, data, packet):
|
|
3644
|
+
try:
|
|
3645
|
+
unpacked = msgpack.unpackb(data)
|
|
3646
|
+
if type(unpacked) == list and len(unpacked) >= 1:
|
|
3647
|
+
signal = unpacked[0]
|
|
3648
|
+
if signal == LXMPeer.ERROR_INVALID_STAMP:
|
|
3649
|
+
RNS.log("Message rejected by propagation node", RNS.LOG_ERROR)
|
|
3650
|
+
if hasattr(packet, "link") and hasattr(
|
|
3651
|
+
packet.link, "for_lxmessage"
|
|
3652
|
+
):
|
|
3653
|
+
lxm = packet.link.for_lxmessage
|
|
3654
|
+
RNS.log(f"Invalid propagation stamp on {lxm}", RNS.LOG_ERROR)
|
|
3655
|
+
self.cancel_outbound(
|
|
3656
|
+
lxm.message_id, cancel_state=LXMessage.REJECTED
|
|
3657
|
+
)
|
|
3658
|
+
|
|
3659
|
+
except Exception as e:
|
|
3660
|
+
RNS.log(
|
|
3661
|
+
f"An error occurred while processing propagation transfer signalling. The contained exception was: {e}",
|
|
3662
|
+
RNS.LOG_ERROR,
|
|
3663
|
+
)
|
|
3664
|
+
|
|
3665
|
+
def process_outbound(self, sender=None):
|
|
3666
|
+
if self.processing_outbound:
|
|
3667
|
+
return
|
|
3668
|
+
|
|
3669
|
+
for lxmessage in self.pending_outbound:
|
|
3670
|
+
if lxmessage.state == LXMessage.DELIVERED:
|
|
3671
|
+
RNS.log(
|
|
3672
|
+
"Delivery has occurred for "
|
|
3673
|
+
+ str(lxmessage)
|
|
3674
|
+
+ ", removing from outbound queue",
|
|
3675
|
+
RNS.LOG_DEBUG,
|
|
3676
|
+
)
|
|
3677
|
+
self.pending_outbound.remove(lxmessage)
|
|
3678
|
+
|
|
3679
|
+
# Udate ticket delivery stats
|
|
3680
|
+
if lxmessage.include_ticket and FIELD_TICKET in lxmessage.fields:
|
|
3681
|
+
RNS.log(
|
|
3682
|
+
f"Updating latest ticket delivery for {RNS.prettyhexrep(lxmessage.destination_hash)}",
|
|
3683
|
+
RNS.LOG_DEBUG,
|
|
3684
|
+
)
|
|
3685
|
+
self.available_tickets["last_deliveries"][
|
|
3686
|
+
lxmessage.destination_hash
|
|
3687
|
+
] = time.time()
|
|
3688
|
+
self.save_available_tickets()
|
|
3689
|
+
|
|
3690
|
+
# Prepare link for backchannel communications
|
|
3691
|
+
delivery_destination_hash = lxmessage.get_destination().hash
|
|
3692
|
+
if (
|
|
3693
|
+
lxmessage.method == LXMessage.DIRECT
|
|
3694
|
+
and delivery_destination_hash in self.direct_links
|
|
3695
|
+
):
|
|
3696
|
+
direct_link = self.direct_links[delivery_destination_hash]
|
|
3697
|
+
if (
|
|
3698
|
+
not hasattr(direct_link, "backchannel_identified")
|
|
3699
|
+
or direct_link.backchannel_identified == False
|
|
3700
|
+
):
|
|
3701
|
+
if direct_link.initiator == True:
|
|
3702
|
+
source_destination_hash = lxmessage.get_source().hash
|
|
3703
|
+
if source_destination_hash in self.delivery_destinations:
|
|
3704
|
+
backchannel_identity = self.delivery_destinations[
|
|
3705
|
+
source_destination_hash
|
|
3706
|
+
].identity
|
|
3707
|
+
backchannel_desthash = (
|
|
3708
|
+
RNS.Destination.hash_from_name_and_identity(
|
|
3709
|
+
"lxmf.delivery", backchannel_identity
|
|
3710
|
+
)
|
|
3711
|
+
)
|
|
3712
|
+
direct_link.identify(backchannel_identity)
|
|
3713
|
+
direct_link.backchannel_identified = True
|
|
3714
|
+
self.delivery_link_established(direct_link)
|
|
3715
|
+
RNS.log(
|
|
3716
|
+
f"Performed backchannel identification as {RNS.prettyhexrep(backchannel_desthash)} on {direct_link}",
|
|
3717
|
+
RNS.LOG_DEBUG,
|
|
3718
|
+
)
|
|
3719
|
+
|
|
3720
|
+
elif (
|
|
3721
|
+
lxmessage.method == LXMessage.PROPAGATED
|
|
3722
|
+
and lxmessage.state == LXMessage.SENT
|
|
3723
|
+
):
|
|
3724
|
+
RNS.log(
|
|
3725
|
+
"Propagation has occurred for "
|
|
3726
|
+
+ str(lxmessage)
|
|
3727
|
+
+ ", removing from outbound queue",
|
|
3728
|
+
RNS.LOG_DEBUG,
|
|
3729
|
+
)
|
|
3730
|
+
self.pending_outbound.remove(lxmessage)
|
|
3731
|
+
|
|
3732
|
+
elif lxmessage.state == LXMessage.CANCELLED:
|
|
3733
|
+
RNS.log(
|
|
3734
|
+
"Cancellation requested for "
|
|
3735
|
+
+ str(lxmessage)
|
|
3736
|
+
+ ", removing from outbound queue",
|
|
3737
|
+
RNS.LOG_DEBUG,
|
|
3738
|
+
)
|
|
3739
|
+
self.pending_outbound.remove(lxmessage)
|
|
3740
|
+
if lxmessage.failed_callback != None and callable(
|
|
3741
|
+
lxmessage.failed_callback
|
|
3742
|
+
):
|
|
3743
|
+
lxmessage.failed_callback(lxmessage)
|
|
3744
|
+
|
|
3745
|
+
elif lxmessage.state == LXMessage.REJECTED:
|
|
3746
|
+
RNS.log(
|
|
3747
|
+
"Receiver rejected "
|
|
3748
|
+
+ str(lxmessage)
|
|
3749
|
+
+ ", removing from outbound queue",
|
|
3750
|
+
RNS.LOG_DEBUG,
|
|
3751
|
+
)
|
|
3752
|
+
if lxmessage in self.pending_outbound:
|
|
3753
|
+
self.pending_outbound.remove(lxmessage)
|
|
3754
|
+
if lxmessage.failed_callback != None and callable(
|
|
3755
|
+
lxmessage.failed_callback
|
|
3756
|
+
):
|
|
3757
|
+
lxmessage.failed_callback(lxmessage)
|
|
3758
|
+
|
|
3759
|
+
else:
|
|
3760
|
+
RNS.log(
|
|
3761
|
+
"Outbound processing for "
|
|
3762
|
+
+ str(lxmessage)
|
|
3763
|
+
+ " to "
|
|
3764
|
+
+ RNS.prettyhexrep(lxmessage.get_destination().hash),
|
|
3765
|
+
RNS.LOG_DEBUG,
|
|
3766
|
+
)
|
|
3767
|
+
|
|
3768
|
+
if lxmessage.progress == None or lxmessage.progress < 0.01:
|
|
3769
|
+
lxmessage.progress = 0.01
|
|
3770
|
+
|
|
3771
|
+
# Outbound handling for opportunistic messages
|
|
3772
|
+
if lxmessage.method == LXMessage.OPPORTUNISTIC:
|
|
3773
|
+
if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS:
|
|
3774
|
+
if (
|
|
3775
|
+
lxmessage.delivery_attempts >= LXMRouter.MAX_PATHLESS_TRIES
|
|
3776
|
+
and not RNS.Transport.has_path(
|
|
3777
|
+
lxmessage.get_destination().hash
|
|
3778
|
+
)
|
|
3779
|
+
):
|
|
3780
|
+
RNS.log(
|
|
3781
|
+
f"Requesting path to {RNS.prettyhexrep(lxmessage.get_destination().hash)} after {lxmessage.delivery_attempts} pathless tries for {lxmessage}",
|
|
3782
|
+
RNS.LOG_DEBUG,
|
|
3783
|
+
)
|
|
3784
|
+
lxmessage.delivery_attempts += 1
|
|
3785
|
+
RNS.Transport.request_path(lxmessage.get_destination().hash)
|
|
3786
|
+
lxmessage.next_delivery_attempt = (
|
|
3787
|
+
time.time() + LXMRouter.PATH_REQUEST_WAIT
|
|
3788
|
+
)
|
|
3789
|
+
lxmessage.progress = 0.01
|
|
3790
|
+
elif (
|
|
3791
|
+
lxmessage.delivery_attempts
|
|
3792
|
+
== LXMRouter.MAX_PATHLESS_TRIES + 1
|
|
3793
|
+
and RNS.Transport.has_path(lxmessage.get_destination().hash)
|
|
3794
|
+
):
|
|
3795
|
+
RNS.log(
|
|
3796
|
+
f"Opportunistic delivery for {lxmessage} still unsuccessful after {lxmessage.delivery_attempts} attempts, trying to rediscover path to {RNS.prettyhexrep(lxmessage.get_destination().hash)}",
|
|
3797
|
+
RNS.LOG_DEBUG,
|
|
3798
|
+
)
|
|
3799
|
+
lxmessage.delivery_attempts += 1
|
|
3800
|
+
RNS.Reticulum.get_instance().drop_path(
|
|
3801
|
+
lxmessage.get_destination().hash
|
|
3802
|
+
)
|
|
3803
|
+
|
|
3804
|
+
def rediscover_job():
|
|
3805
|
+
time.sleep(0.5)
|
|
3806
|
+
RNS.Transport.request_path(
|
|
3807
|
+
lxmessage.get_destination().hash
|
|
3808
|
+
)
|
|
3809
|
+
|
|
3810
|
+
threading.Thread(target=rediscover_job, daemon=True).start()
|
|
3811
|
+
lxmessage.next_delivery_attempt = (
|
|
3812
|
+
time.time() + LXMRouter.PATH_REQUEST_WAIT
|
|
3813
|
+
)
|
|
3814
|
+
lxmessage.progress = 0.01
|
|
3815
|
+
else:
|
|
3816
|
+
if (
|
|
3817
|
+
not hasattr(lxmessage, "next_delivery_attempt")
|
|
3818
|
+
or time.time() > lxmessage.next_delivery_attempt
|
|
3819
|
+
):
|
|
3820
|
+
lxmessage.delivery_attempts += 1
|
|
3821
|
+
lxmessage.next_delivery_attempt = (
|
|
3822
|
+
time.time() + LXMRouter.DELIVERY_RETRY_WAIT
|
|
3823
|
+
)
|
|
3824
|
+
RNS.log(
|
|
3825
|
+
"Opportunistic delivery attempt "
|
|
3826
|
+
+ str(lxmessage.delivery_attempts)
|
|
3827
|
+
+ " for "
|
|
3828
|
+
+ str(lxmessage)
|
|
3829
|
+
+ " to "
|
|
3830
|
+
+ RNS.prettyhexrep(
|
|
3831
|
+
lxmessage.get_destination().hash
|
|
3832
|
+
),
|
|
3833
|
+
RNS.LOG_DEBUG,
|
|
3834
|
+
)
|
|
3835
|
+
lxmessage.send()
|
|
3836
|
+
else:
|
|
3837
|
+
RNS.log(
|
|
3838
|
+
"Max delivery attempts reached for oppertunistic "
|
|
3839
|
+
+ str(lxmessage)
|
|
3840
|
+
+ " to "
|
|
3841
|
+
+ RNS.prettyhexrep(lxmessage.get_destination().hash),
|
|
3842
|
+
RNS.LOG_DEBUG,
|
|
3843
|
+
)
|
|
3844
|
+
self.fail_message(lxmessage)
|
|
3845
|
+
|
|
3846
|
+
# Outbound handling for messages transferred
|
|
3847
|
+
# over a direct link to the final recipient
|
|
3848
|
+
elif lxmessage.method == LXMessage.DIRECT:
|
|
3849
|
+
if lxmessage.delivery_attempts <= LXMRouter.MAX_DELIVERY_ATTEMPTS:
|
|
3850
|
+
delivery_destination_hash = lxmessage.get_destination().hash
|
|
3851
|
+
direct_link = None
|
|
3852
|
+
|
|
3853
|
+
if delivery_destination_hash in self.direct_links:
|
|
3854
|
+
# An established direct link already exists to
|
|
3855
|
+
# the destination, so we'll try to use it for
|
|
3856
|
+
# delivering the message
|
|
3857
|
+
direct_link = self.direct_links[delivery_destination_hash]
|
|
3858
|
+
RNS.log(
|
|
3859
|
+
f"Using available direct link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}",
|
|
3860
|
+
RNS.LOG_DEBUG,
|
|
3861
|
+
)
|
|
3862
|
+
|
|
3863
|
+
elif delivery_destination_hash in self.backchannel_links:
|
|
3864
|
+
# An established backchannel link exists to
|
|
3865
|
+
# the destination, so we'll try to use it for
|
|
3866
|
+
# delivering the message
|
|
3867
|
+
direct_link = self.backchannel_links[
|
|
3868
|
+
delivery_destination_hash
|
|
3869
|
+
]
|
|
3870
|
+
RNS.log(
|
|
3871
|
+
f"Using available backchannel link {direct_link} to {RNS.prettyhexrep(delivery_destination_hash)}",
|
|
3872
|
+
RNS.LOG_DEBUG,
|
|
3873
|
+
)
|
|
3874
|
+
|
|
3875
|
+
if direct_link != None:
|
|
3876
|
+
if direct_link.status == RNS.Link.ACTIVE:
|
|
3877
|
+
if (
|
|
3878
|
+
lxmessage.progress == None
|
|
3879
|
+
or lxmessage.progress < 0.05
|
|
3880
|
+
):
|
|
3881
|
+
lxmessage.progress = 0.05
|
|
3882
|
+
if lxmessage.state != LXMessage.SENDING:
|
|
3883
|
+
RNS.log(
|
|
3884
|
+
"Starting transfer of "
|
|
3885
|
+
+ str(lxmessage)
|
|
3886
|
+
+ " to "
|
|
3887
|
+
+ RNS.prettyhexrep(
|
|
3888
|
+
lxmessage.get_destination().hash
|
|
3889
|
+
)
|
|
3890
|
+
+ " on link "
|
|
3891
|
+
+ str(direct_link),
|
|
3892
|
+
RNS.LOG_DEBUG,
|
|
3893
|
+
)
|
|
3894
|
+
lxmessage.set_delivery_destination(direct_link)
|
|
3895
|
+
lxmessage.send()
|
|
3896
|
+
else:
|
|
3897
|
+
if lxmessage.representation == LXMessage.RESOURCE:
|
|
3898
|
+
RNS.log(
|
|
3899
|
+
"The transfer of "
|
|
3900
|
+
+ str(lxmessage)
|
|
3901
|
+
+ " is in progress ("
|
|
3902
|
+
+ str(round(lxmessage.progress * 100, 1))
|
|
3903
|
+
+ "%)",
|
|
3904
|
+
RNS.LOG_DEBUG,
|
|
3905
|
+
)
|
|
3906
|
+
else:
|
|
3907
|
+
RNS.log(
|
|
3908
|
+
"Waiting for proof for "
|
|
3909
|
+
+ str(lxmessage)
|
|
3910
|
+
+ " sent as link packet",
|
|
3911
|
+
RNS.LOG_DEBUG,
|
|
3912
|
+
)
|
|
3913
|
+
elif direct_link.status == RNS.Link.CLOSED:
|
|
3914
|
+
if direct_link.activated_at != None:
|
|
3915
|
+
RNS.log(
|
|
3916
|
+
"The link to "
|
|
3917
|
+
+ RNS.prettyhexrep(
|
|
3918
|
+
lxmessage.get_destination().hash
|
|
3919
|
+
)
|
|
3920
|
+
+ " was closed unexpectedly, retrying path request...",
|
|
3921
|
+
RNS.LOG_DEBUG,
|
|
3922
|
+
)
|
|
3923
|
+
RNS.Transport.request_path(
|
|
3924
|
+
lxmessage.get_destination().hash
|
|
3925
|
+
)
|
|
3926
|
+
else:
|
|
3927
|
+
if not hasattr(lxmessage, "path_request_retried"):
|
|
3928
|
+
RNS.log(
|
|
3929
|
+
"The link to "
|
|
3930
|
+
+ RNS.prettyhexrep(
|
|
3931
|
+
lxmessage.get_destination().hash
|
|
3932
|
+
)
|
|
3933
|
+
+ " was never activated, retrying path request...",
|
|
3934
|
+
RNS.LOG_DEBUG,
|
|
3935
|
+
)
|
|
3936
|
+
RNS.Transport.request_path(
|
|
3937
|
+
lxmessage.get_destination().hash
|
|
3938
|
+
)
|
|
3939
|
+
lxmessage.path_request_retried = True
|
|
3940
|
+
else:
|
|
3941
|
+
RNS.log(
|
|
3942
|
+
"The link to "
|
|
3943
|
+
+ RNS.prettyhexrep(
|
|
3944
|
+
lxmessage.get_destination().hash
|
|
3945
|
+
)
|
|
3946
|
+
+ " was never activated",
|
|
3947
|
+
RNS.LOG_DEBUG,
|
|
3948
|
+
)
|
|
3949
|
+
|
|
3950
|
+
lxmessage.next_delivery_attempt = (
|
|
3951
|
+
time.time() + LXMRouter.PATH_REQUEST_WAIT
|
|
3952
|
+
)
|
|
3953
|
+
|
|
3954
|
+
lxmessage.set_delivery_destination(None)
|
|
3955
|
+
if delivery_destination_hash in self.direct_links:
|
|
3956
|
+
self.direct_links.pop(delivery_destination_hash)
|
|
3957
|
+
if delivery_destination_hash in self.backchannel_links:
|
|
3958
|
+
self.backchannel_links.pop(
|
|
3959
|
+
delivery_destination_hash
|
|
3960
|
+
)
|
|
3961
|
+
lxmessage.next_delivery_attempt = (
|
|
3962
|
+
time.time() + LXMRouter.DELIVERY_RETRY_WAIT
|
|
3963
|
+
)
|
|
3964
|
+
else:
|
|
3965
|
+
# Simply wait for the link to become active or close
|
|
3966
|
+
RNS.log(
|
|
3967
|
+
"The link to "
|
|
3968
|
+
+ RNS.prettyhexrep(lxmessage.get_destination().hash)
|
|
3969
|
+
+ " is pending, waiting for link to become active",
|
|
3970
|
+
RNS.LOG_DEBUG,
|
|
3971
|
+
)
|
|
3972
|
+
else:
|
|
3973
|
+
# No link exists, so we'll try to establish one, but
|
|
3974
|
+
# only if we've never tried before, or the retry wait
|
|
3975
|
+
# period has elapsed.
|
|
3976
|
+
if (
|
|
3977
|
+
not hasattr(lxmessage, "next_delivery_attempt")
|
|
3978
|
+
or time.time() > lxmessage.next_delivery_attempt
|
|
3979
|
+
):
|
|
3980
|
+
lxmessage.delivery_attempts += 1
|
|
3981
|
+
lxmessage.next_delivery_attempt = (
|
|
3982
|
+
time.time() + LXMRouter.DELIVERY_RETRY_WAIT
|
|
3983
|
+
)
|
|
3984
|
+
|
|
3985
|
+
if (
|
|
3986
|
+
lxmessage.delivery_attempts
|
|
3987
|
+
< LXMRouter.MAX_DELIVERY_ATTEMPTS
|
|
3988
|
+
):
|
|
3989
|
+
if RNS.Transport.has_path(
|
|
3990
|
+
lxmessage.get_destination().hash
|
|
3991
|
+
):
|
|
3992
|
+
RNS.log(
|
|
3993
|
+
"Establishing link to "
|
|
3994
|
+
+ RNS.prettyhexrep(
|
|
3995
|
+
lxmessage.get_destination().hash
|
|
3996
|
+
)
|
|
3997
|
+
+ " for delivery attempt "
|
|
3998
|
+
+ str(lxmessage.delivery_attempts)
|
|
3999
|
+
+ " to "
|
|
4000
|
+
+ RNS.prettyhexrep(
|
|
4001
|
+
lxmessage.get_destination().hash
|
|
4002
|
+
),
|
|
4003
|
+
RNS.LOG_DEBUG,
|
|
4004
|
+
)
|
|
4005
|
+
delivery_link = RNS.Link(
|
|
4006
|
+
lxmessage.get_destination()
|
|
4007
|
+
)
|
|
4008
|
+
delivery_link.set_link_established_callback(
|
|
4009
|
+
self.process_outbound
|
|
4010
|
+
)
|
|
4011
|
+
self.direct_links[delivery_destination_hash] = (
|
|
4012
|
+
delivery_link
|
|
4013
|
+
)
|
|
4014
|
+
lxmessage.progress = 0.03
|
|
4015
|
+
else:
|
|
4016
|
+
RNS.log(
|
|
4017
|
+
"No path known for delivery attempt "
|
|
4018
|
+
+ str(lxmessage.delivery_attempts)
|
|
4019
|
+
+ " to "
|
|
4020
|
+
+ RNS.prettyhexrep(
|
|
4021
|
+
lxmessage.get_destination().hash
|
|
4022
|
+
)
|
|
4023
|
+
+ ". Requesting path...",
|
|
4024
|
+
RNS.LOG_DEBUG,
|
|
4025
|
+
)
|
|
4026
|
+
RNS.Transport.request_path(
|
|
4027
|
+
lxmessage.get_destination().hash
|
|
4028
|
+
)
|
|
4029
|
+
lxmessage.next_delivery_attempt = (
|
|
4030
|
+
time.time() + LXMRouter.PATH_REQUEST_WAIT
|
|
4031
|
+
)
|
|
4032
|
+
lxmessage.progress = 0.01
|
|
4033
|
+
else:
|
|
4034
|
+
RNS.log(
|
|
4035
|
+
"Max delivery attempts reached for direct "
|
|
4036
|
+
+ str(lxmessage)
|
|
4037
|
+
+ " to "
|
|
4038
|
+
+ RNS.prettyhexrep(lxmessage.get_destination().hash),
|
|
4039
|
+
RNS.LOG_DEBUG,
|
|
4040
|
+
)
|
|
4041
|
+
self.fail_message(lxmessage)
|
|
4042
|
+
|
|
4043
|
+
# Outbound handling for messages transported via
|
|
4044
|
+
# propagation to a LXMF router network.
|
|
4045
|
+
elif lxmessage.method == LXMessage.PROPAGATED:
|
|
4046
|
+
RNS.log(
|
|
4047
|
+
"Attempting propagated delivery for "
|
|
4048
|
+
+ str(lxmessage)
|
|
4049
|
+
+ " to "
|
|
4050
|
+
+ RNS.prettyhexrep(lxmessage.get_destination().hash),
|
|
4051
|
+
RNS.LOG_DEBUG,
|
|
4052
|
+
)
|
|
4053
|
+
|
|
4054
|
+
if self.outbound_propagation_node == None:
|
|
4055
|
+
RNS.log(
|
|
4056
|
+
"No outbound propagation node specified for propagated "
|
|
4057
|
+
+ str(lxmessage)
|
|
4058
|
+
+ " to "
|
|
4059
|
+
+ RNS.prettyhexrep(lxmessage.get_destination().hash),
|
|
4060
|
+
RNS.LOG_ERROR,
|
|
4061
|
+
)
|
|
4062
|
+
self.fail_message(lxmessage)
|
|
4063
|
+
else:
|
|
4064
|
+
if (
|
|
4065
|
+
lxmessage.delivery_attempts
|
|
4066
|
+
<= LXMRouter.MAX_DELIVERY_ATTEMPTS
|
|
4067
|
+
):
|
|
4068
|
+
|
|
4069
|
+
if self.outbound_propagation_link != None:
|
|
4070
|
+
# A link already exists, so we'll try to use it
|
|
4071
|
+
# to deliver the message
|
|
4072
|
+
if (
|
|
4073
|
+
self.outbound_propagation_link.status
|
|
4074
|
+
== RNS.Link.ACTIVE
|
|
4075
|
+
):
|
|
4076
|
+
if lxmessage.state != LXMessage.SENDING:
|
|
4077
|
+
RNS.log(
|
|
4078
|
+
"Starting propagation transfer of "
|
|
4079
|
+
+ str(lxmessage)
|
|
4080
|
+
+ " to "
|
|
4081
|
+
+ RNS.prettyhexrep(
|
|
4082
|
+
lxmessage.get_destination().hash
|
|
4083
|
+
)
|
|
4084
|
+
+ " via "
|
|
4085
|
+
+ RNS.prettyhexrep(
|
|
4086
|
+
self.outbound_propagation_node
|
|
4087
|
+
),
|
|
4088
|
+
RNS.LOG_DEBUG,
|
|
4089
|
+
)
|
|
4090
|
+
lxmessage.set_delivery_destination(
|
|
4091
|
+
self.outbound_propagation_link
|
|
4092
|
+
)
|
|
4093
|
+
lxmessage.send()
|
|
4094
|
+
else:
|
|
4095
|
+
if (
|
|
4096
|
+
lxmessage.representation
|
|
4097
|
+
== LXMessage.RESOURCE
|
|
4098
|
+
):
|
|
4099
|
+
RNS.log(
|
|
4100
|
+
"The transfer of "
|
|
4101
|
+
+ str(lxmessage)
|
|
4102
|
+
+ " is in progress ("
|
|
4103
|
+
+ str(
|
|
4104
|
+
round(lxmessage.progress * 100, 1)
|
|
4105
|
+
)
|
|
4106
|
+
+ "%)",
|
|
4107
|
+
RNS.LOG_DEBUG,
|
|
4108
|
+
)
|
|
4109
|
+
else:
|
|
4110
|
+
RNS.log(
|
|
4111
|
+
"Waiting for proof for "
|
|
4112
|
+
+ str(lxmessage)
|
|
4113
|
+
+ " sent as link packet",
|
|
4114
|
+
RNS.LOG_DEBUG,
|
|
4115
|
+
)
|
|
4116
|
+
elif (
|
|
4117
|
+
self.outbound_propagation_link.status
|
|
4118
|
+
== RNS.Link.CLOSED
|
|
4119
|
+
):
|
|
4120
|
+
RNS.log(
|
|
4121
|
+
"The link to "
|
|
4122
|
+
+ RNS.prettyhexrep(
|
|
4123
|
+
self.outbound_propagation_node
|
|
4124
|
+
)
|
|
4125
|
+
+ " was closed",
|
|
4126
|
+
RNS.LOG_DEBUG,
|
|
4127
|
+
)
|
|
4128
|
+
self.outbound_propagation_link = None
|
|
4129
|
+
lxmessage.next_delivery_attempt = (
|
|
4130
|
+
time.time() + LXMRouter.DELIVERY_RETRY_WAIT
|
|
4131
|
+
)
|
|
4132
|
+
else:
|
|
4133
|
+
# Simply wait for the link to become
|
|
4134
|
+
# active or close
|
|
4135
|
+
RNS.log(
|
|
4136
|
+
"The propagation link to "
|
|
4137
|
+
+ RNS.prettyhexrep(
|
|
4138
|
+
self.outbound_propagation_node
|
|
4139
|
+
)
|
|
4140
|
+
+ " is pending, waiting for link to become active",
|
|
4141
|
+
RNS.LOG_DEBUG,
|
|
4142
|
+
)
|
|
4143
|
+
else:
|
|
4144
|
+
# No link exists, so we'll try to establish one, but
|
|
4145
|
+
# only if we've never tried before, or the retry wait
|
|
4146
|
+
# period has elapsed.
|
|
4147
|
+
if (
|
|
4148
|
+
not hasattr(lxmessage, "next_delivery_attempt")
|
|
4149
|
+
or time.time() > lxmessage.next_delivery_attempt
|
|
4150
|
+
):
|
|
4151
|
+
lxmessage.delivery_attempts += 1
|
|
4152
|
+
lxmessage.next_delivery_attempt = (
|
|
4153
|
+
time.time() + LXMRouter.DELIVERY_RETRY_WAIT
|
|
4154
|
+
)
|
|
4155
|
+
|
|
4156
|
+
if (
|
|
4157
|
+
lxmessage.delivery_attempts
|
|
4158
|
+
< LXMRouter.MAX_DELIVERY_ATTEMPTS
|
|
4159
|
+
):
|
|
4160
|
+
if RNS.Transport.has_path(
|
|
4161
|
+
self.outbound_propagation_node
|
|
4162
|
+
):
|
|
4163
|
+
RNS.log(
|
|
4164
|
+
"Establishing link to "
|
|
4165
|
+
+ RNS.prettyhexrep(
|
|
4166
|
+
self.outbound_propagation_node
|
|
4167
|
+
)
|
|
4168
|
+
+ " for propagation attempt "
|
|
4169
|
+
+ str(lxmessage.delivery_attempts)
|
|
4170
|
+
+ " to "
|
|
4171
|
+
+ RNS.prettyhexrep(
|
|
4172
|
+
lxmessage.get_destination().hash
|
|
4173
|
+
),
|
|
4174
|
+
RNS.LOG_DEBUG,
|
|
4175
|
+
)
|
|
4176
|
+
propagation_node_identity = (
|
|
4177
|
+
RNS.Identity.recall(
|
|
4178
|
+
self.outbound_propagation_node
|
|
4179
|
+
)
|
|
4180
|
+
)
|
|
4181
|
+
propagation_node_destination = (
|
|
4182
|
+
RNS.Destination(
|
|
4183
|
+
propagation_node_identity,
|
|
4184
|
+
RNS.Destination.OUT,
|
|
4185
|
+
RNS.Destination.SINGLE,
|
|
4186
|
+
APP_NAME,
|
|
4187
|
+
"propagation",
|
|
4188
|
+
)
|
|
4189
|
+
)
|
|
4190
|
+
self.outbound_propagation_link = RNS.Link(
|
|
4191
|
+
propagation_node_destination,
|
|
4192
|
+
established_callback=self.process_outbound,
|
|
4193
|
+
)
|
|
4194
|
+
self.outbound_propagation_link.set_packet_callback(
|
|
4195
|
+
self.propagation_transfer_signalling_packet
|
|
4196
|
+
)
|
|
4197
|
+
self.outbound_propagation_link.for_lxmessage = (
|
|
4198
|
+
lxmessage
|
|
4199
|
+
)
|
|
4200
|
+
else:
|
|
4201
|
+
RNS.log(
|
|
4202
|
+
"No path known for propagation attempt "
|
|
4203
|
+
+ str(lxmessage.delivery_attempts)
|
|
4204
|
+
+ " to "
|
|
4205
|
+
+ RNS.prettyhexrep(
|
|
4206
|
+
self.outbound_propagation_node
|
|
4207
|
+
)
|
|
4208
|
+
+ ". Requesting path...",
|
|
4209
|
+
RNS.LOG_DEBUG,
|
|
4210
|
+
)
|
|
4211
|
+
RNS.Transport.request_path(
|
|
4212
|
+
self.outbound_propagation_node
|
|
4213
|
+
)
|
|
4214
|
+
lxmessage.next_delivery_attempt = (
|
|
4215
|
+
time.time()
|
|
4216
|
+
+ LXMRouter.PATH_REQUEST_WAIT
|
|
4217
|
+
)
|
|
4218
|
+
|
|
4219
|
+
else:
|
|
4220
|
+
RNS.log(
|
|
4221
|
+
"Max delivery attempts reached for propagated "
|
|
4222
|
+
+ str(lxmessage)
|
|
4223
|
+
+ " to "
|
|
4224
|
+
+ RNS.prettyhexrep(lxmessage.get_destination().hash),
|
|
4225
|
+
RNS.LOG_DEBUG,
|
|
4226
|
+
)
|
|
4227
|
+
self.fail_message(lxmessage)
|