ReticulumTelemetryHub 0.1.0__py3-none-any.whl → 0.143.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reticulum_telemetry_hub/api/__init__.py +23 -0
- reticulum_telemetry_hub/api/models.py +323 -0
- reticulum_telemetry_hub/api/service.py +836 -0
- reticulum_telemetry_hub/api/storage.py +528 -0
- reticulum_telemetry_hub/api/storage_base.py +156 -0
- reticulum_telemetry_hub/api/storage_models.py +118 -0
- reticulum_telemetry_hub/atak_cot/__init__.py +49 -0
- reticulum_telemetry_hub/atak_cot/base.py +277 -0
- reticulum_telemetry_hub/atak_cot/chat.py +506 -0
- reticulum_telemetry_hub/atak_cot/detail.py +235 -0
- reticulum_telemetry_hub/atak_cot/event.py +181 -0
- reticulum_telemetry_hub/atak_cot/pytak_client.py +569 -0
- reticulum_telemetry_hub/atak_cot/tak_connector.py +848 -0
- reticulum_telemetry_hub/config/__init__.py +25 -0
- reticulum_telemetry_hub/config/constants.py +7 -0
- reticulum_telemetry_hub/config/manager.py +515 -0
- reticulum_telemetry_hub/config/models.py +215 -0
- reticulum_telemetry_hub/embedded_lxmd/__init__.py +5 -0
- reticulum_telemetry_hub/embedded_lxmd/embedded.py +418 -0
- reticulum_telemetry_hub/internal_api/__init__.py +21 -0
- reticulum_telemetry_hub/internal_api/bus.py +344 -0
- reticulum_telemetry_hub/internal_api/core.py +690 -0
- reticulum_telemetry_hub/internal_api/v1/__init__.py +74 -0
- reticulum_telemetry_hub/internal_api/v1/enums.py +109 -0
- reticulum_telemetry_hub/internal_api/v1/manifest.json +8 -0
- reticulum_telemetry_hub/internal_api/v1/schemas.py +478 -0
- reticulum_telemetry_hub/internal_api/versioning.py +63 -0
- reticulum_telemetry_hub/lxmf_daemon/Handlers.py +122 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMF.py +252 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMPeer.py +898 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMRouter.py +4227 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMessage.py +1006 -0
- reticulum_telemetry_hub/lxmf_daemon/LXStamper.py +490 -0
- reticulum_telemetry_hub/lxmf_daemon/__init__.py +10 -0
- reticulum_telemetry_hub/lxmf_daemon/_version.py +1 -0
- reticulum_telemetry_hub/lxmf_daemon/lxmd.py +1655 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/fields/field_telemetry_stream.py +6 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/__init__.py +3 -0
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/appearance.py +19 -19
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/peer.py +17 -13
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/__init__.py +65 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/acceleration.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/ambient_light.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/angular_velocity.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/battery.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/connection_map.py +258 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/generic.py +841 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/gravity.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/humidity.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/information.py +42 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/location.py +110 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/lxmf_propagation.py +429 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/magnetic_field.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/physical_link.py +53 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/pressure.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/proximity.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/received.py +75 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/rns_transport.py +209 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor.py +65 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor_enum.py +27 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor_mapping.py +58 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/temperature.py +37 -0
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/sensors/time.py +36 -32
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/telemeter.py +26 -23
- reticulum_telemetry_hub/lxmf_telemetry/sampler.py +229 -0
- reticulum_telemetry_hub/lxmf_telemetry/telemeter_manager.py +409 -0
- reticulum_telemetry_hub/lxmf_telemetry/telemetry_controller.py +804 -0
- reticulum_telemetry_hub/northbound/__init__.py +5 -0
- reticulum_telemetry_hub/northbound/app.py +195 -0
- reticulum_telemetry_hub/northbound/auth.py +119 -0
- reticulum_telemetry_hub/northbound/gateway.py +310 -0
- reticulum_telemetry_hub/northbound/internal_adapter.py +302 -0
- reticulum_telemetry_hub/northbound/models.py +213 -0
- reticulum_telemetry_hub/northbound/routes_chat.py +123 -0
- reticulum_telemetry_hub/northbound/routes_files.py +119 -0
- reticulum_telemetry_hub/northbound/routes_rest.py +345 -0
- reticulum_telemetry_hub/northbound/routes_subscribers.py +150 -0
- reticulum_telemetry_hub/northbound/routes_topics.py +178 -0
- reticulum_telemetry_hub/northbound/routes_ws.py +107 -0
- reticulum_telemetry_hub/northbound/serializers.py +72 -0
- reticulum_telemetry_hub/northbound/services.py +373 -0
- reticulum_telemetry_hub/northbound/websocket.py +855 -0
- reticulum_telemetry_hub/reticulum_server/__main__.py +2237 -0
- reticulum_telemetry_hub/reticulum_server/command_manager.py +1268 -0
- reticulum_telemetry_hub/reticulum_server/command_text.py +399 -0
- reticulum_telemetry_hub/reticulum_server/constants.py +1 -0
- reticulum_telemetry_hub/reticulum_server/event_log.py +357 -0
- reticulum_telemetry_hub/reticulum_server/internal_adapter.py +358 -0
- reticulum_telemetry_hub/reticulum_server/outbound_queue.py +312 -0
- reticulum_telemetry_hub/reticulum_server/services.py +422 -0
- reticulumtelemetryhub-0.143.0.dist-info/METADATA +181 -0
- reticulumtelemetryhub-0.143.0.dist-info/RECORD +97 -0
- {reticulumtelemetryhub-0.1.0.dist-info → reticulumtelemetryhub-0.143.0.dist-info}/WHEEL +1 -1
- reticulumtelemetryhub-0.143.0.dist-info/licenses/LICENSE +277 -0
- lxmf_telemetry/model/fields/field_telemetry_stream.py +0 -7
- lxmf_telemetry/model/persistance/__init__.py +0 -3
- lxmf_telemetry/model/persistance/sensors/location.py +0 -69
- lxmf_telemetry/model/persistance/sensors/magnetic_field.py +0 -36
- lxmf_telemetry/model/persistance/sensors/sensor.py +0 -44
- lxmf_telemetry/model/persistance/sensors/sensor_enum.py +0 -24
- lxmf_telemetry/model/persistance/sensors/sensor_mapping.py +0 -9
- lxmf_telemetry/telemetry_controller.py +0 -124
- reticulum_server/main.py +0 -182
- reticulumtelemetryhub-0.1.0.dist-info/METADATA +0 -15
- reticulumtelemetryhub-0.1.0.dist-info/RECORD +0 -19
- {lxmf_telemetry → reticulum_telemetry_hub}/__init__.py +0 -0
- {lxmf_telemetry/model/persistance/sensors → reticulum_telemetry_hub/lxmf_telemetry}/__init__.py +0 -0
- {reticulum_server → reticulum_telemetry_hub/reticulum_server}/__init__.py +0 -0
|
@@ -0,0 +1,898 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import threading
|
|
4
|
+
|
|
5
|
+
import RNS
|
|
6
|
+
import RNS.vendor.umsgpack as msgpack
|
|
7
|
+
from . import LXStamper
|
|
8
|
+
|
|
9
|
+
from collections import deque
|
|
10
|
+
from .LXMF import APP_NAME
|
|
11
|
+
from .LXMF import PN_META_NAME
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LXMPeer:
|
|
15
|
+
OFFER_REQUEST_PATH = "/offer"
|
|
16
|
+
MESSAGE_GET_PATH = "/get"
|
|
17
|
+
|
|
18
|
+
IDLE = 0x00
|
|
19
|
+
LINK_ESTABLISHING = 0x01
|
|
20
|
+
LINK_READY = 0x02
|
|
21
|
+
REQUEST_SENT = 0x03
|
|
22
|
+
RESPONSE_RECEIVED = 0x04
|
|
23
|
+
RESOURCE_TRANSFERRING = 0x05
|
|
24
|
+
|
|
25
|
+
ERROR_NO_IDENTITY = 0xF0
|
|
26
|
+
ERROR_NO_ACCESS = 0xF1
|
|
27
|
+
ERROR_INVALID_KEY = 0xF3
|
|
28
|
+
ERROR_INVALID_DATA = 0xF4
|
|
29
|
+
ERROR_INVALID_STAMP = 0xF5
|
|
30
|
+
ERROR_THROTTLED = 0xF6
|
|
31
|
+
ERROR_NOT_FOUND = 0xFD
|
|
32
|
+
ERROR_TIMEOUT = 0xFE
|
|
33
|
+
|
|
34
|
+
STRATEGY_LAZY = 0x01
|
|
35
|
+
STRATEGY_PERSISTENT = 0x02
|
|
36
|
+
DEFAULT_SYNC_STRATEGY = STRATEGY_PERSISTENT
|
|
37
|
+
|
|
38
|
+
# Maximum amount of time a peer can
|
|
39
|
+
# be unreachable before it is removed
|
|
40
|
+
MAX_UNREACHABLE = 14 * 24 * 60 * 60
|
|
41
|
+
|
|
42
|
+
# Everytime consecutive time a sync
|
|
43
|
+
# link fails to establish, add this
|
|
44
|
+
# amount off time to wait before the
|
|
45
|
+
# next sync is attempted.
|
|
46
|
+
SYNC_BACKOFF_STEP = 12 * 60
|
|
47
|
+
|
|
48
|
+
# How long to wait for an answer to
|
|
49
|
+
# peer path requests before deferring
|
|
50
|
+
# sync to later.
|
|
51
|
+
PATH_REQUEST_GRACE = 7.5
|
|
52
|
+
|
|
53
|
+
@staticmethod
|
|
54
|
+
def from_bytes(peer_bytes, router):
|
|
55
|
+
dictionary = msgpack.unpackb(peer_bytes)
|
|
56
|
+
peer_destination_hash = dictionary["destination_hash"]
|
|
57
|
+
peer_peering_timebase = dictionary["peering_timebase"]
|
|
58
|
+
peer_alive = dictionary["alive"]
|
|
59
|
+
peer_last_heard = dictionary["last_heard"]
|
|
60
|
+
|
|
61
|
+
peer = LXMPeer(router, peer_destination_hash)
|
|
62
|
+
peer.peering_timebase = peer_peering_timebase
|
|
63
|
+
peer.alive = peer_alive
|
|
64
|
+
peer.last_heard = peer_last_heard
|
|
65
|
+
|
|
66
|
+
if "link_establishment_rate" in dictionary:
|
|
67
|
+
peer.link_establishment_rate = dictionary["link_establishment_rate"]
|
|
68
|
+
else:
|
|
69
|
+
peer.link_establishment_rate = 0
|
|
70
|
+
|
|
71
|
+
if "sync_transfer_rate" in dictionary:
|
|
72
|
+
peer.sync_transfer_rate = dictionary["sync_transfer_rate"]
|
|
73
|
+
else:
|
|
74
|
+
peer.sync_transfer_rate = 0
|
|
75
|
+
|
|
76
|
+
if "propagation_transfer_limit" in dictionary:
|
|
77
|
+
try:
|
|
78
|
+
peer.propagation_transfer_limit = float(
|
|
79
|
+
dictionary["propagation_transfer_limit"]
|
|
80
|
+
)
|
|
81
|
+
except Exception as e:
|
|
82
|
+
peer.propagation_transfer_limit = None
|
|
83
|
+
else:
|
|
84
|
+
peer.propagation_transfer_limit = None
|
|
85
|
+
|
|
86
|
+
if "propagation_sync_limit" in dictionary:
|
|
87
|
+
try:
|
|
88
|
+
peer.propagation_sync_limit = int(dictionary["propagation_sync_limit"])
|
|
89
|
+
except:
|
|
90
|
+
peer.propagation_sync_limit = peer.propagation_transfer_limit
|
|
91
|
+
else:
|
|
92
|
+
peer.propagation_sync_limit = peer.propagation_transfer_limit
|
|
93
|
+
|
|
94
|
+
if "propagation_stamp_cost" in dictionary:
|
|
95
|
+
try:
|
|
96
|
+
peer.propagation_stamp_cost = int(dictionary["propagation_stamp_cost"])
|
|
97
|
+
except:
|
|
98
|
+
peer.propagation_stamp_cost = None
|
|
99
|
+
else:
|
|
100
|
+
peer.propagation_stamp_cost = None
|
|
101
|
+
|
|
102
|
+
if "propagation_stamp_cost_flexibility" in dictionary:
|
|
103
|
+
try:
|
|
104
|
+
peer.propagation_stamp_cost_flexibility = int(
|
|
105
|
+
dictionary["propagation_stamp_cost_flexibility"]
|
|
106
|
+
)
|
|
107
|
+
except:
|
|
108
|
+
peer.propagation_stamp_cost_flexibility = None
|
|
109
|
+
else:
|
|
110
|
+
peer.propagation_stamp_cost_flexibility = None
|
|
111
|
+
|
|
112
|
+
if "peering_cost" in dictionary:
|
|
113
|
+
try:
|
|
114
|
+
peer.peering_cost = int(dictionary["peering_cost"])
|
|
115
|
+
except:
|
|
116
|
+
peer.peering_cost = None
|
|
117
|
+
else:
|
|
118
|
+
peer.peering_cost = None
|
|
119
|
+
|
|
120
|
+
if "sync_strategy" in dictionary:
|
|
121
|
+
try:
|
|
122
|
+
peer.sync_strategy = int(dictionary["sync_strategy"])
|
|
123
|
+
except:
|
|
124
|
+
peer.sync_strategy = LXMPeer.DEFAULT_SYNC_STRATEGY
|
|
125
|
+
else:
|
|
126
|
+
peer.sync_strategy = LXMPeer.DEFAULT_SYNC_STRATEGY
|
|
127
|
+
|
|
128
|
+
if "offered" in dictionary:
|
|
129
|
+
peer.offered = dictionary["offered"]
|
|
130
|
+
else:
|
|
131
|
+
peer.offered = 0
|
|
132
|
+
if "outgoing" in dictionary:
|
|
133
|
+
peer.outgoing = dictionary["outgoing"]
|
|
134
|
+
else:
|
|
135
|
+
peer.outgoing = 0
|
|
136
|
+
if "incoming" in dictionary:
|
|
137
|
+
peer.incoming = dictionary["incoming"]
|
|
138
|
+
else:
|
|
139
|
+
peer.incoming = 0
|
|
140
|
+
if "rx_bytes" in dictionary:
|
|
141
|
+
peer.rx_bytes = dictionary["rx_bytes"]
|
|
142
|
+
else:
|
|
143
|
+
peer.rx_bytes = 0
|
|
144
|
+
if "tx_bytes" in dictionary:
|
|
145
|
+
peer.tx_bytes = dictionary["tx_bytes"]
|
|
146
|
+
else:
|
|
147
|
+
peer.tx_bytes = 0
|
|
148
|
+
if "last_sync_attempt" in dictionary:
|
|
149
|
+
peer.last_sync_attempt = dictionary["last_sync_attempt"]
|
|
150
|
+
else:
|
|
151
|
+
peer.last_sync_attempt = 0
|
|
152
|
+
if "peering_key" in dictionary:
|
|
153
|
+
peer.peering_key = dictionary["peering_key"]
|
|
154
|
+
else:
|
|
155
|
+
peer.peering_key = None
|
|
156
|
+
if "metadata" in dictionary:
|
|
157
|
+
peer.metadata = dictionary["metadata"]
|
|
158
|
+
else:
|
|
159
|
+
peer.metadata = None
|
|
160
|
+
|
|
161
|
+
hm_count = 0
|
|
162
|
+
for transient_id in dictionary["handled_ids"]:
|
|
163
|
+
if transient_id in router.propagation_entries:
|
|
164
|
+
peer.add_handled_message(transient_id)
|
|
165
|
+
hm_count += 1
|
|
166
|
+
|
|
167
|
+
um_count = 0
|
|
168
|
+
for transient_id in dictionary["unhandled_ids"]:
|
|
169
|
+
if transient_id in router.propagation_entries:
|
|
170
|
+
peer.add_unhandled_message(transient_id)
|
|
171
|
+
um_count += 1
|
|
172
|
+
|
|
173
|
+
peer._hm_count = hm_count
|
|
174
|
+
peer._um_count = um_count
|
|
175
|
+
peer._hm_counts_synced = True
|
|
176
|
+
peer._um_counts_synced = True
|
|
177
|
+
|
|
178
|
+
del dictionary
|
|
179
|
+
return peer
|
|
180
|
+
|
|
181
|
+
def to_bytes(self):
|
|
182
|
+
dictionary = {}
|
|
183
|
+
dictionary["peering_timebase"] = self.peering_timebase
|
|
184
|
+
dictionary["alive"] = self.alive
|
|
185
|
+
dictionary["metadata"] = self.metadata
|
|
186
|
+
dictionary["last_heard"] = self.last_heard
|
|
187
|
+
dictionary["sync_strategy"] = self.sync_strategy
|
|
188
|
+
dictionary["peering_key"] = self.peering_key
|
|
189
|
+
dictionary["destination_hash"] = self.destination_hash
|
|
190
|
+
dictionary["link_establishment_rate"] = self.link_establishment_rate
|
|
191
|
+
dictionary["sync_transfer_rate"] = self.sync_transfer_rate
|
|
192
|
+
dictionary["propagation_transfer_limit"] = self.propagation_transfer_limit
|
|
193
|
+
dictionary["propagation_sync_limit"] = self.propagation_sync_limit
|
|
194
|
+
dictionary["propagation_stamp_cost"] = self.propagation_stamp_cost
|
|
195
|
+
dictionary["propagation_stamp_cost_flexibility"] = (
|
|
196
|
+
self.propagation_stamp_cost_flexibility
|
|
197
|
+
)
|
|
198
|
+
dictionary["peering_cost"] = self.peering_cost
|
|
199
|
+
dictionary["last_sync_attempt"] = self.last_sync_attempt
|
|
200
|
+
dictionary["offered"] = self.offered
|
|
201
|
+
dictionary["outgoing"] = self.outgoing
|
|
202
|
+
dictionary["incoming"] = self.incoming
|
|
203
|
+
dictionary["rx_bytes"] = self.rx_bytes
|
|
204
|
+
dictionary["tx_bytes"] = self.tx_bytes
|
|
205
|
+
|
|
206
|
+
handled_ids = []
|
|
207
|
+
for transient_id in self.handled_messages:
|
|
208
|
+
handled_ids.append(transient_id)
|
|
209
|
+
|
|
210
|
+
unhandled_ids = []
|
|
211
|
+
for transient_id in self.unhandled_messages:
|
|
212
|
+
unhandled_ids.append(transient_id)
|
|
213
|
+
|
|
214
|
+
dictionary["handled_ids"] = handled_ids
|
|
215
|
+
dictionary["unhandled_ids"] = unhandled_ids
|
|
216
|
+
|
|
217
|
+
peer_bytes = msgpack.packb(dictionary)
|
|
218
|
+
del dictionary
|
|
219
|
+
|
|
220
|
+
return peer_bytes
|
|
221
|
+
|
|
222
|
+
def __init__(self, router, destination_hash, sync_strategy=DEFAULT_SYNC_STRATEGY):
|
|
223
|
+
self.alive = False
|
|
224
|
+
self.last_heard = 0
|
|
225
|
+
self.sync_strategy = sync_strategy
|
|
226
|
+
self.peering_key = None
|
|
227
|
+
self.peering_cost = None
|
|
228
|
+
self.metadata = None
|
|
229
|
+
|
|
230
|
+
self.next_sync_attempt = 0
|
|
231
|
+
self.last_sync_attempt = 0
|
|
232
|
+
self.sync_backoff = 0
|
|
233
|
+
self.peering_timebase = 0
|
|
234
|
+
self.link_establishment_rate = 0
|
|
235
|
+
self.sync_transfer_rate = 0
|
|
236
|
+
|
|
237
|
+
self.propagation_transfer_limit = None
|
|
238
|
+
self.propagation_sync_limit = None
|
|
239
|
+
self.propagation_stamp_cost = None
|
|
240
|
+
self.propagation_stamp_cost_flexibility = None
|
|
241
|
+
self.currently_transferring_messages = None
|
|
242
|
+
self.handled_messages_queue = deque()
|
|
243
|
+
self.unhandled_messages_queue = deque()
|
|
244
|
+
|
|
245
|
+
self.offered = 0 # Messages offered to this peer
|
|
246
|
+
self.outgoing = 0 # Messages transferred to this peer
|
|
247
|
+
self.incoming = 0 # Messages received from this peer
|
|
248
|
+
self.rx_bytes = 0 # Bytes received from this peer
|
|
249
|
+
self.tx_bytes = 0 # Bytes sent to this peer
|
|
250
|
+
|
|
251
|
+
self._hm_count = 0
|
|
252
|
+
self._um_count = 0
|
|
253
|
+
self._hm_counts_synced = False
|
|
254
|
+
self._um_counts_synced = False
|
|
255
|
+
|
|
256
|
+
self._peering_key_lock = threading.Lock()
|
|
257
|
+
|
|
258
|
+
self.link = None
|
|
259
|
+
self.state = LXMPeer.IDLE
|
|
260
|
+
|
|
261
|
+
self.last_offer = []
|
|
262
|
+
|
|
263
|
+
self.router = router
|
|
264
|
+
self.destination_hash = destination_hash
|
|
265
|
+
self.identity = RNS.Identity.recall(destination_hash)
|
|
266
|
+
if self.identity != None:
|
|
267
|
+
self.destination = RNS.Destination(
|
|
268
|
+
self.identity,
|
|
269
|
+
RNS.Destination.OUT,
|
|
270
|
+
RNS.Destination.SINGLE,
|
|
271
|
+
APP_NAME,
|
|
272
|
+
"propagation",
|
|
273
|
+
)
|
|
274
|
+
else:
|
|
275
|
+
self.destination = None
|
|
276
|
+
RNS.log(
|
|
277
|
+
f"Could not recall identity for LXMF propagation peer {RNS.prettyhexrep(self.destination_hash)}, will retry identity resolution on next sync",
|
|
278
|
+
RNS.LOG_WARNING,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
def peering_key_ready(self):
|
|
282
|
+
if not self.peering_cost:
|
|
283
|
+
return False
|
|
284
|
+
if type(self.peering_key) == list and len(self.peering_key) == 2:
|
|
285
|
+
value = self.peering_key[1]
|
|
286
|
+
if value >= self.peering_cost:
|
|
287
|
+
return True
|
|
288
|
+
else:
|
|
289
|
+
RNS.log(
|
|
290
|
+
f"Peering key value mismatch for {self}. Current value is {value}, but peer requires {self.peering_cost}. Scheduling regeneration...",
|
|
291
|
+
RNS.LOG_WARNING,
|
|
292
|
+
)
|
|
293
|
+
self.peering_key = None
|
|
294
|
+
|
|
295
|
+
return False
|
|
296
|
+
|
|
297
|
+
def peering_key_value(self):
|
|
298
|
+
if type(self.peering_key) == list and len(self.peering_key) == 2:
|
|
299
|
+
return self.peering_key[1]
|
|
300
|
+
else:
|
|
301
|
+
return None
|
|
302
|
+
|
|
303
|
+
def generate_peering_key(self):
|
|
304
|
+
if self.peering_cost == None:
|
|
305
|
+
return False
|
|
306
|
+
with self._peering_key_lock:
|
|
307
|
+
if self.peering_key != None:
|
|
308
|
+
return True
|
|
309
|
+
else:
|
|
310
|
+
RNS.log(f"Generating peering key for {self}", RNS.LOG_NOTICE)
|
|
311
|
+
if self.router.identity == None:
|
|
312
|
+
RNS.log(
|
|
313
|
+
f"Could not update peering key for {self} since the local LXMF router identity is not configured",
|
|
314
|
+
RNS.LOG_ERROR,
|
|
315
|
+
)
|
|
316
|
+
return False
|
|
317
|
+
|
|
318
|
+
if self.identity == None:
|
|
319
|
+
self.identity = RNS.Identity.recall(self.destination_hash)
|
|
320
|
+
if self.identity == None:
|
|
321
|
+
RNS.log(
|
|
322
|
+
f"Could not update peering key for {self} since its identity could not be recalled",
|
|
323
|
+
RNS.LOG_ERROR,
|
|
324
|
+
)
|
|
325
|
+
return False
|
|
326
|
+
|
|
327
|
+
key_material = self.identity.hash + self.router.identity.hash
|
|
328
|
+
peering_key, value = LXStamper.generate_stamp(
|
|
329
|
+
key_material,
|
|
330
|
+
self.peering_cost,
|
|
331
|
+
expand_rounds=LXStamper.WORKBLOCK_EXPAND_ROUNDS_PEERING,
|
|
332
|
+
)
|
|
333
|
+
if value >= self.peering_cost:
|
|
334
|
+
self.peering_key = [peering_key, value]
|
|
335
|
+
RNS.log(
|
|
336
|
+
f"Peering key successfully generated for {self}", RNS.LOG_NOTICE
|
|
337
|
+
)
|
|
338
|
+
return True
|
|
339
|
+
|
|
340
|
+
return False
|
|
341
|
+
|
|
342
|
+
def sync(self):
|
|
343
|
+
RNS.log(
|
|
344
|
+
"Initiating LXMF Propagation Node sync with peer "
|
|
345
|
+
+ RNS.prettyhexrep(self.destination_hash),
|
|
346
|
+
RNS.LOG_DEBUG,
|
|
347
|
+
)
|
|
348
|
+
self.last_sync_attempt = time.time()
|
|
349
|
+
|
|
350
|
+
sync_time_reached = time.time() > self.next_sync_attempt
|
|
351
|
+
stamp_costs_known = (
|
|
352
|
+
self.propagation_stamp_cost != None
|
|
353
|
+
and self.propagation_stamp_cost_flexibility != None
|
|
354
|
+
and self.peering_cost != None
|
|
355
|
+
)
|
|
356
|
+
peering_key_ready = self.peering_key_ready()
|
|
357
|
+
sync_checks = sync_time_reached and stamp_costs_known and peering_key_ready
|
|
358
|
+
|
|
359
|
+
if not sync_checks:
|
|
360
|
+
postpone_reason = ""
|
|
361
|
+
try:
|
|
362
|
+
if not sync_time_reached:
|
|
363
|
+
postpone_reason = " due to previous failures"
|
|
364
|
+
if self.last_sync_attempt > self.last_heard:
|
|
365
|
+
self.alive = False
|
|
366
|
+
elif not stamp_costs_known:
|
|
367
|
+
postpone_reason = (
|
|
368
|
+
" since its required stamp costs are not yet known"
|
|
369
|
+
)
|
|
370
|
+
elif not peering_key_ready:
|
|
371
|
+
postpone_reason = " since a peering key has not been generated yet"
|
|
372
|
+
|
|
373
|
+
def job():
|
|
374
|
+
self.generate_peering_key()
|
|
375
|
+
|
|
376
|
+
threading.Thread(target=job, daemon=True).start()
|
|
377
|
+
|
|
378
|
+
delay = self.next_sync_attempt - time.time()
|
|
379
|
+
postpone_delay = f" for {RNS.prettytime(delay)}" if delay > 0 else ""
|
|
380
|
+
RNS.log(
|
|
381
|
+
f"Postponing sync with peer {RNS.prettyhexrep(self.destination_hash)}{postpone_delay}{postpone_reason}",
|
|
382
|
+
RNS.LOG_DEBUG,
|
|
383
|
+
)
|
|
384
|
+
except Exception as e:
|
|
385
|
+
RNS.trace_exception(e)
|
|
386
|
+
|
|
387
|
+
else:
|
|
388
|
+
if not RNS.Transport.has_path(self.destination_hash):
|
|
389
|
+
RNS.log(
|
|
390
|
+
"No path to peer "
|
|
391
|
+
+ RNS.prettyhexrep(self.destination_hash)
|
|
392
|
+
+ " exists, requesting...",
|
|
393
|
+
RNS.LOG_DEBUG,
|
|
394
|
+
)
|
|
395
|
+
RNS.Transport.request_path(self.destination_hash)
|
|
396
|
+
time.sleep(LXMPeer.PATH_REQUEST_GRACE)
|
|
397
|
+
|
|
398
|
+
if not RNS.Transport.has_path(self.destination_hash):
|
|
399
|
+
RNS.log(
|
|
400
|
+
"Path request was not answered, retrying sync with peer "
|
|
401
|
+
+ RNS.prettyhexrep(self.destination_hash)
|
|
402
|
+
+ " later",
|
|
403
|
+
RNS.LOG_DEBUG,
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
else:
|
|
407
|
+
if self.identity == None:
|
|
408
|
+
self.identity = RNS.Identity.recall(self.destination_hash)
|
|
409
|
+
if self.identity != None:
|
|
410
|
+
self.destination = RNS.Destination(
|
|
411
|
+
self.identity,
|
|
412
|
+
RNS.Destination.OUT,
|
|
413
|
+
RNS.Destination.SINGLE,
|
|
414
|
+
APP_NAME,
|
|
415
|
+
"propagation",
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
if self.destination != None:
|
|
419
|
+
if len(self.unhandled_messages) == 0:
|
|
420
|
+
RNS.log(
|
|
421
|
+
f"Sync requested for {self}, but no unhandled messages exist for peer. Sync complete.",
|
|
422
|
+
RNS.LOG_DEBUG,
|
|
423
|
+
)
|
|
424
|
+
return
|
|
425
|
+
|
|
426
|
+
if len(self.unhandled_messages) > 0:
|
|
427
|
+
if self.currently_transferring_messages != None:
|
|
428
|
+
RNS.log(
|
|
429
|
+
f"Sync requested for {self}, but current message transfer index was not clear. Aborting.",
|
|
430
|
+
RNS.LOG_ERROR,
|
|
431
|
+
)
|
|
432
|
+
return
|
|
433
|
+
|
|
434
|
+
if self.state == LXMPeer.IDLE:
|
|
435
|
+
RNS.log(
|
|
436
|
+
"Establishing link for sync to peer "
|
|
437
|
+
+ RNS.prettyhexrep(self.destination_hash)
|
|
438
|
+
+ "...",
|
|
439
|
+
RNS.LOG_DEBUG,
|
|
440
|
+
)
|
|
441
|
+
self.sync_backoff += LXMPeer.SYNC_BACKOFF_STEP
|
|
442
|
+
self.next_sync_attempt = time.time() + self.sync_backoff
|
|
443
|
+
self.link = RNS.Link(
|
|
444
|
+
self.destination,
|
|
445
|
+
established_callback=self.link_established,
|
|
446
|
+
closed_callback=self.link_closed,
|
|
447
|
+
)
|
|
448
|
+
self.state = LXMPeer.LINK_ESTABLISHING
|
|
449
|
+
|
|
450
|
+
else:
|
|
451
|
+
if self.state == LXMPeer.LINK_READY:
|
|
452
|
+
self.alive = True
|
|
453
|
+
self.last_heard = time.time()
|
|
454
|
+
self.sync_backoff = 0
|
|
455
|
+
min_accepted_cost = min(
|
|
456
|
+
0,
|
|
457
|
+
self.propagation_stamp_cost
|
|
458
|
+
- self.propagation_stamp_cost_flexibility,
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
RNS.log(
|
|
462
|
+
"Synchronisation link to peer "
|
|
463
|
+
+ RNS.prettyhexrep(self.destination_hash)
|
|
464
|
+
+ " established, preparing sync offer...",
|
|
465
|
+
RNS.LOG_DEBUG,
|
|
466
|
+
)
|
|
467
|
+
unhandled_entries = []
|
|
468
|
+
unhandled_ids = []
|
|
469
|
+
purged_ids = []
|
|
470
|
+
low_value_ids = []
|
|
471
|
+
for transient_id in self.unhandled_messages:
|
|
472
|
+
if transient_id in self.router.propagation_entries:
|
|
473
|
+
if (
|
|
474
|
+
self.router.get_stamp_value(transient_id)
|
|
475
|
+
< min_accepted_cost
|
|
476
|
+
):
|
|
477
|
+
low_value_ids.append(transient_id)
|
|
478
|
+
else:
|
|
479
|
+
unhandled_entry = [
|
|
480
|
+
transient_id,
|
|
481
|
+
self.router.get_weight(transient_id),
|
|
482
|
+
self.router.get_size(transient_id),
|
|
483
|
+
]
|
|
484
|
+
|
|
485
|
+
unhandled_entries.append(unhandled_entry)
|
|
486
|
+
|
|
487
|
+
else:
|
|
488
|
+
purged_ids.append(transient_id)
|
|
489
|
+
|
|
490
|
+
for transient_id in purged_ids:
|
|
491
|
+
RNS.log(
|
|
492
|
+
f"Dropping unhandled message {RNS.prettyhexrep(transient_id)} for peer {RNS.prettyhexrep(self.destination_hash)} since it no longer exists in the message store.",
|
|
493
|
+
RNS.LOG_DEBUG,
|
|
494
|
+
)
|
|
495
|
+
self.remove_unhandled_message(transient_id)
|
|
496
|
+
|
|
497
|
+
for transient_id in low_value_ids:
|
|
498
|
+
RNS.log(
|
|
499
|
+
f"Dropping unhandled message {RNS.prettyhexrep(transient_id)} for peer {RNS.prettyhexrep(self.destination_hash)} since its stamp value is lower than peer requirement of {min_accepted_cost}.",
|
|
500
|
+
RNS.LOG_DEBUG,
|
|
501
|
+
)
|
|
502
|
+
self.remove_unhandled_message(transient_id)
|
|
503
|
+
|
|
504
|
+
unhandled_entries.sort(
|
|
505
|
+
key=lambda e: e[1], reverse=False
|
|
506
|
+
)
|
|
507
|
+
per_message_overhead = 16 # Really only 2 bytes, but set a bit higher for now
|
|
508
|
+
cumulative_size = 24 # Initialised to highest reasonable binary structure overhead
|
|
509
|
+
RNS.log(
|
|
510
|
+
f"Syncing to peer with per-message limit {RNS.prettysize(self.propagation_transfer_limit*1000)} and sync limit {RNS.prettysize(self.propagation_sync_limit*1000)}"
|
|
511
|
+
) # TODO: Remove debug
|
|
512
|
+
|
|
513
|
+
for unhandled_entry in unhandled_entries:
|
|
514
|
+
transient_id = unhandled_entry[0]
|
|
515
|
+
weight = unhandled_entry[1]
|
|
516
|
+
lxm_size = unhandled_entry[2]
|
|
517
|
+
lxm_transfer_size = lxm_size + per_message_overhead
|
|
518
|
+
next_size = cumulative_size + lxm_transfer_size
|
|
519
|
+
|
|
520
|
+
if (
|
|
521
|
+
self.propagation_transfer_limit != None
|
|
522
|
+
and lxm_transfer_size
|
|
523
|
+
> (self.propagation_transfer_limit * 1000)
|
|
524
|
+
):
|
|
525
|
+
self.remove_unhandled_message(transient_id)
|
|
526
|
+
self.add_handled_message(transient_id)
|
|
527
|
+
continue
|
|
528
|
+
|
|
529
|
+
if (
|
|
530
|
+
self.propagation_sync_limit != None
|
|
531
|
+
and next_size
|
|
532
|
+
>= (self.propagation_sync_limit * 1000)
|
|
533
|
+
):
|
|
534
|
+
continue
|
|
535
|
+
|
|
536
|
+
cumulative_size += lxm_transfer_size
|
|
537
|
+
unhandled_ids.append(transient_id)
|
|
538
|
+
|
|
539
|
+
offer = [self.peering_key[0], unhandled_ids]
|
|
540
|
+
|
|
541
|
+
RNS.log(
|
|
542
|
+
f"Offering {len(unhandled_ids)} messages to peer {RNS.prettyhexrep(self.destination.hash)} ({RNS.prettysize(len(msgpack.packb(unhandled_ids)))})",
|
|
543
|
+
RNS.LOG_VERBOSE,
|
|
544
|
+
)
|
|
545
|
+
self.last_offer = unhandled_ids
|
|
546
|
+
self.link.request(
|
|
547
|
+
LXMPeer.OFFER_REQUEST_PATH,
|
|
548
|
+
offer,
|
|
549
|
+
response_callback=self.offer_response,
|
|
550
|
+
failed_callback=self.request_failed,
|
|
551
|
+
)
|
|
552
|
+
self.state = LXMPeer.REQUEST_SENT
|
|
553
|
+
|
|
554
|
+
else:
|
|
555
|
+
RNS.log(
|
|
556
|
+
f"Could not request sync to peer {RNS.prettyhexrep(self.destination_hash)} since its identity could not be recalled.",
|
|
557
|
+
RNS.LOG_ERROR,
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
def request_failed(self, request_receipt):
|
|
561
|
+
RNS.log(f"Sync request to peer {self.destination} failed", RNS.LOG_DEBUG)
|
|
562
|
+
if self.link != None:
|
|
563
|
+
self.link.teardown()
|
|
564
|
+
self.state = LXMPeer.IDLE
|
|
565
|
+
|
|
566
|
+
def offer_response(self, request_receipt):
|
|
567
|
+
try:
|
|
568
|
+
self.state = LXMPeer.RESPONSE_RECEIVED
|
|
569
|
+
response = request_receipt.response
|
|
570
|
+
|
|
571
|
+
wanted_messages = []
|
|
572
|
+
wanted_message_ids = []
|
|
573
|
+
|
|
574
|
+
if response == LXMPeer.ERROR_NO_IDENTITY:
|
|
575
|
+
if self.link != None:
|
|
576
|
+
RNS.log(
|
|
577
|
+
"Remote peer indicated that no identification was received, retrying...",
|
|
578
|
+
RNS.LOG_VERBOSE,
|
|
579
|
+
)
|
|
580
|
+
self.link.identify(self.router.identity)
|
|
581
|
+
self.state = LXMPeer.LINK_READY
|
|
582
|
+
self.sync()
|
|
583
|
+
return
|
|
584
|
+
|
|
585
|
+
elif response == LXMPeer.ERROR_NO_ACCESS:
|
|
586
|
+
RNS.log(
|
|
587
|
+
"Remote indicated that access was denied, breaking peering",
|
|
588
|
+
RNS.LOG_VERBOSE,
|
|
589
|
+
)
|
|
590
|
+
self.router.unpeer(self.destination_hash)
|
|
591
|
+
return
|
|
592
|
+
|
|
593
|
+
elif response == LXMPeer.ERROR_THROTTLED:
|
|
594
|
+
throttle_time = self.router.PN_STAMP_THROTTLE
|
|
595
|
+
RNS.log(
|
|
596
|
+
f"Remote indicated that we're throttled, postponing sync for {RNS.prettytime(throttle_time)}",
|
|
597
|
+
RNS.LOG_VERBOSE,
|
|
598
|
+
)
|
|
599
|
+
self.next_sync_attempt = time.time() + throttle_time
|
|
600
|
+
return
|
|
601
|
+
|
|
602
|
+
elif response == False:
|
|
603
|
+
# Peer already has all advertised messages
|
|
604
|
+
for transient_id in self.last_offer:
|
|
605
|
+
if transient_id in self.unhandled_messages:
|
|
606
|
+
self.add_handled_message(transient_id)
|
|
607
|
+
self.remove_unhandled_message(transient_id)
|
|
608
|
+
|
|
609
|
+
elif response == True:
|
|
610
|
+
# Peer wants all advertised messages
|
|
611
|
+
for transient_id in self.last_offer:
|
|
612
|
+
wanted_messages.append(
|
|
613
|
+
self.router.propagation_entries[transient_id]
|
|
614
|
+
)
|
|
615
|
+
wanted_message_ids.append(transient_id)
|
|
616
|
+
|
|
617
|
+
else:
|
|
618
|
+
# Peer wants some advertised messages
|
|
619
|
+
for transient_id in self.last_offer.copy():
|
|
620
|
+
# If the peer did not want the message, it has
|
|
621
|
+
# already received it from another peer.
|
|
622
|
+
if not transient_id in response:
|
|
623
|
+
self.add_handled_message(transient_id)
|
|
624
|
+
self.remove_unhandled_message(transient_id)
|
|
625
|
+
|
|
626
|
+
for transient_id in response:
|
|
627
|
+
wanted_messages.append(
|
|
628
|
+
self.router.propagation_entries[transient_id]
|
|
629
|
+
)
|
|
630
|
+
wanted_message_ids.append(transient_id)
|
|
631
|
+
|
|
632
|
+
if len(wanted_messages) > 0:
|
|
633
|
+
RNS.log(
|
|
634
|
+
f"Peer {RNS.prettyhexrep(self.destination_hash)} wanted {str(len(wanted_messages))} of the available messages",
|
|
635
|
+
RNS.LOG_VERBOSE,
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
lxm_list = []
|
|
639
|
+
for message_entry in wanted_messages:
|
|
640
|
+
file_path = message_entry[1]
|
|
641
|
+
if os.path.isfile(file_path):
|
|
642
|
+
file = open(file_path, "rb")
|
|
643
|
+
lxmf_data = file.read()
|
|
644
|
+
file.close()
|
|
645
|
+
lxm_list.append(lxmf_data)
|
|
646
|
+
|
|
647
|
+
data = msgpack.packb([time.time(), lxm_list])
|
|
648
|
+
RNS.log(
|
|
649
|
+
f"Total transfer size for this sync is {RNS.prettysize(len(data))}",
|
|
650
|
+
RNS.LOG_VERBOSE,
|
|
651
|
+
)
|
|
652
|
+
resource = RNS.Resource(
|
|
653
|
+
data, self.link, callback=self.resource_concluded
|
|
654
|
+
)
|
|
655
|
+
self.currently_transferring_messages = wanted_message_ids
|
|
656
|
+
self.current_sync_transfer_started = time.time()
|
|
657
|
+
self.state = LXMPeer.RESOURCE_TRANSFERRING
|
|
658
|
+
|
|
659
|
+
else:
|
|
660
|
+
RNS.log(
|
|
661
|
+
f"Peer {RNS.prettyhexrep(self.destination_hash)} did not request any of the available messages, sync completed",
|
|
662
|
+
RNS.LOG_VERBOSE,
|
|
663
|
+
)
|
|
664
|
+
self.offered += len(self.last_offer)
|
|
665
|
+
if self.link != None:
|
|
666
|
+
self.link.teardown()
|
|
667
|
+
|
|
668
|
+
self.link = None
|
|
669
|
+
self.state = LXMPeer.IDLE
|
|
670
|
+
|
|
671
|
+
except Exception as e:
|
|
672
|
+
RNS.log(
|
|
673
|
+
"Error while handling offer response from peer "
|
|
674
|
+
+ str(self.destination),
|
|
675
|
+
RNS.LOG_ERROR,
|
|
676
|
+
)
|
|
677
|
+
RNS.log("The contained exception was: " + str(e), RNS.LOG_ERROR)
|
|
678
|
+
|
|
679
|
+
if self.link != None:
|
|
680
|
+
self.link.teardown()
|
|
681
|
+
|
|
682
|
+
self.link = None
|
|
683
|
+
self.state = LXMPeer.IDLE
|
|
684
|
+
|
|
685
|
+
def resource_concluded(self, resource):
|
|
686
|
+
if resource.status == RNS.Resource.COMPLETE:
|
|
687
|
+
if self.currently_transferring_messages == None:
|
|
688
|
+
RNS.log(
|
|
689
|
+
f"Sync transfer completed on {self}, but transferred message index was unavailable. Aborting.",
|
|
690
|
+
RNS.LOG_ERROR,
|
|
691
|
+
)
|
|
692
|
+
if self.link != None:
|
|
693
|
+
self.link.teardown()
|
|
694
|
+
self.link = None
|
|
695
|
+
self.state = LXMPeer.IDLE
|
|
696
|
+
|
|
697
|
+
for transient_id in self.currently_transferring_messages:
|
|
698
|
+
self.add_handled_message(transient_id)
|
|
699
|
+
self.remove_unhandled_message(transient_id)
|
|
700
|
+
|
|
701
|
+
if self.link != None:
|
|
702
|
+
self.link.teardown()
|
|
703
|
+
self.link = None
|
|
704
|
+
self.state = LXMPeer.IDLE
|
|
705
|
+
|
|
706
|
+
rate_str = ""
|
|
707
|
+
if self.current_sync_transfer_started != None:
|
|
708
|
+
self.sync_transfer_rate = (resource.get_transfer_size() * 8) / (
|
|
709
|
+
time.time() - self.current_sync_transfer_started
|
|
710
|
+
)
|
|
711
|
+
rate_str = f" at {RNS.prettyspeed(self.sync_transfer_rate)}"
|
|
712
|
+
|
|
713
|
+
RNS.log(
|
|
714
|
+
f"Syncing {len(self.currently_transferring_messages)} messages to peer {RNS.prettyhexrep(self.destination_hash)} completed{rate_str}",
|
|
715
|
+
RNS.LOG_VERBOSE,
|
|
716
|
+
)
|
|
717
|
+
self.alive = True
|
|
718
|
+
self.last_heard = time.time()
|
|
719
|
+
self.offered += len(self.last_offer)
|
|
720
|
+
self.outgoing += len(self.currently_transferring_messages)
|
|
721
|
+
self.tx_bytes += resource.get_data_size()
|
|
722
|
+
|
|
723
|
+
self.currently_transferring_messages = None
|
|
724
|
+
self.current_sync_transfer_started = None
|
|
725
|
+
|
|
726
|
+
if self.sync_strategy == self.STRATEGY_PERSISTENT:
|
|
727
|
+
if self.unhandled_message_count > 0:
|
|
728
|
+
self.sync()
|
|
729
|
+
|
|
730
|
+
else:
|
|
731
|
+
RNS.log(
|
|
732
|
+
"Resource transfer for LXMF peer sync failed to "
|
|
733
|
+
+ str(self.destination),
|
|
734
|
+
RNS.LOG_VERBOSE,
|
|
735
|
+
)
|
|
736
|
+
if self.link != None:
|
|
737
|
+
self.link.teardown()
|
|
738
|
+
self.link = None
|
|
739
|
+
self.state = LXMPeer.IDLE
|
|
740
|
+
self.currently_transferring_messages = None
|
|
741
|
+
self.current_sync_transfer_started = None
|
|
742
|
+
|
|
743
|
+
def link_established(self, link):
|
|
744
|
+
self.link.identify(self.router.identity)
|
|
745
|
+
link_establishment_rate = link.get_establishment_rate()
|
|
746
|
+
if link_establishment_rate != None:
|
|
747
|
+
self.link_establishment_rate = link_establishment_rate
|
|
748
|
+
|
|
749
|
+
self.state = LXMPeer.LINK_READY
|
|
750
|
+
self.next_sync_attempt = 0
|
|
751
|
+
self.sync()
|
|
752
|
+
|
|
753
|
+
def link_closed(self, link):
|
|
754
|
+
self.link = None
|
|
755
|
+
self.state = LXMPeer.IDLE
|
|
756
|
+
|
|
757
|
+
def queued_items(self):
|
|
758
|
+
return (
|
|
759
|
+
len(self.handled_messages_queue) > 0
|
|
760
|
+
or len(self.unhandled_messages_queue) > 0
|
|
761
|
+
)
|
|
762
|
+
|
|
763
|
+
def queue_unhandled_message(self, transient_id):
|
|
764
|
+
self.unhandled_messages_queue.append(transient_id)
|
|
765
|
+
|
|
766
|
+
def queue_handled_message(self, transient_id):
|
|
767
|
+
self.handled_messages_queue.append(transient_id)
|
|
768
|
+
|
|
769
|
+
def process_queues(self):
|
|
770
|
+
if (
|
|
771
|
+
len(self.unhandled_messages_queue) > 0
|
|
772
|
+
or len(self.handled_messages_queue) > 0
|
|
773
|
+
):
|
|
774
|
+
handled_messages = self.handled_messages
|
|
775
|
+
unhandled_messages = self.unhandled_messages
|
|
776
|
+
|
|
777
|
+
while len(self.handled_messages_queue) > 0:
|
|
778
|
+
transient_id = self.handled_messages_queue.pop()
|
|
779
|
+
if not transient_id in handled_messages:
|
|
780
|
+
self.add_handled_message(transient_id)
|
|
781
|
+
if transient_id in unhandled_messages:
|
|
782
|
+
self.remove_unhandled_message(transient_id)
|
|
783
|
+
|
|
784
|
+
while len(self.unhandled_messages_queue) > 0:
|
|
785
|
+
transient_id = self.unhandled_messages_queue.pop()
|
|
786
|
+
if (
|
|
787
|
+
not transient_id in handled_messages
|
|
788
|
+
and not transient_id in unhandled_messages
|
|
789
|
+
):
|
|
790
|
+
self.add_unhandled_message(transient_id)
|
|
791
|
+
|
|
792
|
+
del handled_messages, unhandled_messages
|
|
793
|
+
|
|
794
|
+
@property
|
|
795
|
+
def handled_messages(self):
|
|
796
|
+
pes = self.router.propagation_entries.copy()
|
|
797
|
+
hm = list(filter(lambda tid: self.destination_hash in pes[tid][4], pes))
|
|
798
|
+
self._hm_count = len(hm)
|
|
799
|
+
del pes
|
|
800
|
+
self._hm_counts_synced = True
|
|
801
|
+
return hm
|
|
802
|
+
|
|
803
|
+
@property
|
|
804
|
+
def unhandled_messages(self):
|
|
805
|
+
pes = self.router.propagation_entries.copy()
|
|
806
|
+
um = list(filter(lambda tid: self.destination_hash in pes[tid][5], pes))
|
|
807
|
+
self._um_count = len(um)
|
|
808
|
+
del pes
|
|
809
|
+
self._um_counts_synced = True
|
|
810
|
+
return um
|
|
811
|
+
|
|
812
|
+
@property
|
|
813
|
+
def handled_message_count(self):
|
|
814
|
+
if not self._hm_counts_synced:
|
|
815
|
+
self._update_counts()
|
|
816
|
+
return self._hm_count
|
|
817
|
+
|
|
818
|
+
@property
|
|
819
|
+
def unhandled_message_count(self):
|
|
820
|
+
if not self._um_counts_synced:
|
|
821
|
+
self._update_counts()
|
|
822
|
+
return self._um_count
|
|
823
|
+
|
|
824
|
+
@property
|
|
825
|
+
def acceptance_rate(self):
|
|
826
|
+
return 0 if self.offered == 0 else (self.outgoing / self.offered)
|
|
827
|
+
|
|
828
|
+
def _update_counts(self):
|
|
829
|
+
if not self._hm_counts_synced:
|
|
830
|
+
hm = self.handled_messages
|
|
831
|
+
del hm
|
|
832
|
+
|
|
833
|
+
if not self._um_counts_synced:
|
|
834
|
+
um = self.unhandled_messages
|
|
835
|
+
del um
|
|
836
|
+
|
|
837
|
+
def add_handled_message(self, transient_id):
|
|
838
|
+
if transient_id in self.router.propagation_entries:
|
|
839
|
+
if (
|
|
840
|
+
not self.destination_hash
|
|
841
|
+
in self.router.propagation_entries[transient_id][4]
|
|
842
|
+
):
|
|
843
|
+
self.router.propagation_entries[transient_id][4].append(
|
|
844
|
+
self.destination_hash
|
|
845
|
+
)
|
|
846
|
+
self._hm_counts_synced = False
|
|
847
|
+
|
|
848
|
+
def add_unhandled_message(self, transient_id):
|
|
849
|
+
if transient_id in self.router.propagation_entries:
|
|
850
|
+
if (
|
|
851
|
+
not self.destination_hash
|
|
852
|
+
in self.router.propagation_entries[transient_id][5]
|
|
853
|
+
):
|
|
854
|
+
self.router.propagation_entries[transient_id][5].append(
|
|
855
|
+
self.destination_hash
|
|
856
|
+
)
|
|
857
|
+
self._um_count += 1
|
|
858
|
+
|
|
859
|
+
def remove_handled_message(self, transient_id):
|
|
860
|
+
if transient_id in self.router.propagation_entries:
|
|
861
|
+
if (
|
|
862
|
+
self.destination_hash
|
|
863
|
+
in self.router.propagation_entries[transient_id][4]
|
|
864
|
+
):
|
|
865
|
+
self.router.propagation_entries[transient_id][4].remove(
|
|
866
|
+
self.destination_hash
|
|
867
|
+
)
|
|
868
|
+
self._hm_counts_synced = False
|
|
869
|
+
|
|
870
|
+
def remove_unhandled_message(self, transient_id):
|
|
871
|
+
if transient_id in self.router.propagation_entries:
|
|
872
|
+
if (
|
|
873
|
+
self.destination_hash
|
|
874
|
+
in self.router.propagation_entries[transient_id][5]
|
|
875
|
+
):
|
|
876
|
+
self.router.propagation_entries[transient_id][5].remove(
|
|
877
|
+
self.destination_hash
|
|
878
|
+
)
|
|
879
|
+
self._um_counts_synced = False
|
|
880
|
+
|
|
881
|
+
@property
|
|
882
|
+
def name(self):
|
|
883
|
+
if type(self.metadata) != dict:
|
|
884
|
+
return None
|
|
885
|
+
else:
|
|
886
|
+
if not PN_META_NAME in self.metadata:
|
|
887
|
+
return None
|
|
888
|
+
else:
|
|
889
|
+
try:
|
|
890
|
+
return self.metadata[PN_META_NAME].decode("utf-8")
|
|
891
|
+
except:
|
|
892
|
+
return None
|
|
893
|
+
|
|
894
|
+
def __str__(self):
|
|
895
|
+
if self.destination_hash:
|
|
896
|
+
return RNS.prettyhexrep(self.destination_hash)
|
|
897
|
+
else:
|
|
898
|
+
return "<Unknown>"
|