ReticulumTelemetryHub 0.1.0__py3-none-any.whl → 0.143.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reticulum_telemetry_hub/api/__init__.py +23 -0
- reticulum_telemetry_hub/api/models.py +323 -0
- reticulum_telemetry_hub/api/service.py +836 -0
- reticulum_telemetry_hub/api/storage.py +528 -0
- reticulum_telemetry_hub/api/storage_base.py +156 -0
- reticulum_telemetry_hub/api/storage_models.py +118 -0
- reticulum_telemetry_hub/atak_cot/__init__.py +49 -0
- reticulum_telemetry_hub/atak_cot/base.py +277 -0
- reticulum_telemetry_hub/atak_cot/chat.py +506 -0
- reticulum_telemetry_hub/atak_cot/detail.py +235 -0
- reticulum_telemetry_hub/atak_cot/event.py +181 -0
- reticulum_telemetry_hub/atak_cot/pytak_client.py +569 -0
- reticulum_telemetry_hub/atak_cot/tak_connector.py +848 -0
- reticulum_telemetry_hub/config/__init__.py +25 -0
- reticulum_telemetry_hub/config/constants.py +7 -0
- reticulum_telemetry_hub/config/manager.py +515 -0
- reticulum_telemetry_hub/config/models.py +215 -0
- reticulum_telemetry_hub/embedded_lxmd/__init__.py +5 -0
- reticulum_telemetry_hub/embedded_lxmd/embedded.py +418 -0
- reticulum_telemetry_hub/internal_api/__init__.py +21 -0
- reticulum_telemetry_hub/internal_api/bus.py +344 -0
- reticulum_telemetry_hub/internal_api/core.py +690 -0
- reticulum_telemetry_hub/internal_api/v1/__init__.py +74 -0
- reticulum_telemetry_hub/internal_api/v1/enums.py +109 -0
- reticulum_telemetry_hub/internal_api/v1/manifest.json +8 -0
- reticulum_telemetry_hub/internal_api/v1/schemas.py +478 -0
- reticulum_telemetry_hub/internal_api/versioning.py +63 -0
- reticulum_telemetry_hub/lxmf_daemon/Handlers.py +122 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMF.py +252 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMPeer.py +898 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMRouter.py +4227 -0
- reticulum_telemetry_hub/lxmf_daemon/LXMessage.py +1006 -0
- reticulum_telemetry_hub/lxmf_daemon/LXStamper.py +490 -0
- reticulum_telemetry_hub/lxmf_daemon/__init__.py +10 -0
- reticulum_telemetry_hub/lxmf_daemon/_version.py +1 -0
- reticulum_telemetry_hub/lxmf_daemon/lxmd.py +1655 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/fields/field_telemetry_stream.py +6 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/__init__.py +3 -0
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/appearance.py +19 -19
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/peer.py +17 -13
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/__init__.py +65 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/acceleration.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/ambient_light.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/angular_velocity.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/battery.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/connection_map.py +258 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/generic.py +841 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/gravity.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/humidity.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/information.py +42 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/location.py +110 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/lxmf_propagation.py +429 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/magnetic_field.py +68 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/physical_link.py +53 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/pressure.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/proximity.py +37 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/received.py +75 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/rns_transport.py +209 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor.py +65 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor_enum.py +27 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor_mapping.py +58 -0
- reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/temperature.py +37 -0
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/sensors/time.py +36 -32
- {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/telemeter.py +26 -23
- reticulum_telemetry_hub/lxmf_telemetry/sampler.py +229 -0
- reticulum_telemetry_hub/lxmf_telemetry/telemeter_manager.py +409 -0
- reticulum_telemetry_hub/lxmf_telemetry/telemetry_controller.py +804 -0
- reticulum_telemetry_hub/northbound/__init__.py +5 -0
- reticulum_telemetry_hub/northbound/app.py +195 -0
- reticulum_telemetry_hub/northbound/auth.py +119 -0
- reticulum_telemetry_hub/northbound/gateway.py +310 -0
- reticulum_telemetry_hub/northbound/internal_adapter.py +302 -0
- reticulum_telemetry_hub/northbound/models.py +213 -0
- reticulum_telemetry_hub/northbound/routes_chat.py +123 -0
- reticulum_telemetry_hub/northbound/routes_files.py +119 -0
- reticulum_telemetry_hub/northbound/routes_rest.py +345 -0
- reticulum_telemetry_hub/northbound/routes_subscribers.py +150 -0
- reticulum_telemetry_hub/northbound/routes_topics.py +178 -0
- reticulum_telemetry_hub/northbound/routes_ws.py +107 -0
- reticulum_telemetry_hub/northbound/serializers.py +72 -0
- reticulum_telemetry_hub/northbound/services.py +373 -0
- reticulum_telemetry_hub/northbound/websocket.py +855 -0
- reticulum_telemetry_hub/reticulum_server/__main__.py +2237 -0
- reticulum_telemetry_hub/reticulum_server/command_manager.py +1268 -0
- reticulum_telemetry_hub/reticulum_server/command_text.py +399 -0
- reticulum_telemetry_hub/reticulum_server/constants.py +1 -0
- reticulum_telemetry_hub/reticulum_server/event_log.py +357 -0
- reticulum_telemetry_hub/reticulum_server/internal_adapter.py +358 -0
- reticulum_telemetry_hub/reticulum_server/outbound_queue.py +312 -0
- reticulum_telemetry_hub/reticulum_server/services.py +422 -0
- reticulumtelemetryhub-0.143.0.dist-info/METADATA +181 -0
- reticulumtelemetryhub-0.143.0.dist-info/RECORD +97 -0
- {reticulumtelemetryhub-0.1.0.dist-info → reticulumtelemetryhub-0.143.0.dist-info}/WHEEL +1 -1
- reticulumtelemetryhub-0.143.0.dist-info/licenses/LICENSE +277 -0
- lxmf_telemetry/model/fields/field_telemetry_stream.py +0 -7
- lxmf_telemetry/model/persistance/__init__.py +0 -3
- lxmf_telemetry/model/persistance/sensors/location.py +0 -69
- lxmf_telemetry/model/persistance/sensors/magnetic_field.py +0 -36
- lxmf_telemetry/model/persistance/sensors/sensor.py +0 -44
- lxmf_telemetry/model/persistance/sensors/sensor_enum.py +0 -24
- lxmf_telemetry/model/persistance/sensors/sensor_mapping.py +0 -9
- lxmf_telemetry/telemetry_controller.py +0 -124
- reticulum_server/main.py +0 -182
- reticulumtelemetryhub-0.1.0.dist-info/METADATA +0 -15
- reticulumtelemetryhub-0.1.0.dist-info/RECORD +0 -19
- {lxmf_telemetry → reticulum_telemetry_hub}/__init__.py +0 -0
- {lxmf_telemetry/model/persistance/sensors → reticulum_telemetry_hub/lxmf_telemetry}/__init__.py +0 -0
- {reticulum_server → reticulum_telemetry_hub/reticulum_server}/__init__.py +0 -0
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
"""Data models representing configuration shapes for the hub."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from configparser import ConfigParser
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from importlib import metadata
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional, Tuple
|
|
10
|
+
|
|
11
|
+
from reticulum_telemetry_hub.config.constants import (
|
|
12
|
+
DEFAULT_ANNOUNCE_INTERVAL,
|
|
13
|
+
DEFAULT_HUB_TELEMETRY_INTERVAL,
|
|
14
|
+
DEFAULT_LOG_LEVEL_NAME,
|
|
15
|
+
DEFAULT_SERVICE_TELEMETRY_INTERVAL,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class RNSInterfaceConfig:
|
|
21
|
+
"""Represents the minimal subset of the TCP server interface configuration."""
|
|
22
|
+
|
|
23
|
+
listen_ip: str = "0.0.0.0"
|
|
24
|
+
listen_port: int = 4242
|
|
25
|
+
interface_enabled: bool = True
|
|
26
|
+
interface_type: str = "TCPServerInterface"
|
|
27
|
+
|
|
28
|
+
def to_dict(self) -> dict:
|
|
29
|
+
"""Serialise the TCP interface configuration.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
dict: Mapping consumable by Reticulum configuration writers.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
return {
|
|
36
|
+
"listen_ip": self.listen_ip,
|
|
37
|
+
"listen_port": self.listen_port,
|
|
38
|
+
"interface_enabled": self.interface_enabled,
|
|
39
|
+
"type": self.interface_type,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class ReticulumConfig:
|
|
45
|
+
"""Object view of the Reticulum configuration file."""
|
|
46
|
+
|
|
47
|
+
path: Path
|
|
48
|
+
enable_transport: bool = True
|
|
49
|
+
share_instance: bool = True
|
|
50
|
+
tcp_interface: RNSInterfaceConfig = field(default_factory=RNSInterfaceConfig)
|
|
51
|
+
|
|
52
|
+
def to_dict(self) -> dict:
|
|
53
|
+
"""Serialise the Reticulum configuration values.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
dict: Flattened representation including nested interfaces.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
data = {
|
|
60
|
+
"path": str(self.path),
|
|
61
|
+
"enable_transport": self.enable_transport,
|
|
62
|
+
"share_instance": self.share_instance,
|
|
63
|
+
}
|
|
64
|
+
data["tcp_interface"] = self.tcp_interface.to_dict()
|
|
65
|
+
return data
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@dataclass
|
|
69
|
+
class LXMFRouterConfig:
|
|
70
|
+
"""Object view of the LXMF router/propagation configuration."""
|
|
71
|
+
|
|
72
|
+
path: Path
|
|
73
|
+
enable_node: bool = True
|
|
74
|
+
announce_interval_minutes: int = 10
|
|
75
|
+
display_name: str = "RTH_router"
|
|
76
|
+
|
|
77
|
+
def to_dict(self) -> dict:
|
|
78
|
+
"""Serialise LXMF router configuration fields.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
dict: Mapping used by the embedded LXMF daemon.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
return {
|
|
85
|
+
"path": str(self.path),
|
|
86
|
+
"enable_node": self.enable_node,
|
|
87
|
+
"announce_interval_minutes": self.announce_interval_minutes,
|
|
88
|
+
"display_name": self.display_name,
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@dataclass
|
|
93
|
+
class HubRuntimeConfig: # pylint: disable=too-many-instance-attributes
|
|
94
|
+
"""Configuration values that guide the hub runtime defaults."""
|
|
95
|
+
|
|
96
|
+
display_name: str = "RTH"
|
|
97
|
+
announce_interval: int = DEFAULT_ANNOUNCE_INTERVAL
|
|
98
|
+
hub_telemetry_interval: int = DEFAULT_HUB_TELEMETRY_INTERVAL
|
|
99
|
+
service_telemetry_interval: int = DEFAULT_SERVICE_TELEMETRY_INTERVAL
|
|
100
|
+
log_level: str = DEFAULT_LOG_LEVEL_NAME
|
|
101
|
+
embedded_lxmd: bool = False
|
|
102
|
+
default_services: Tuple[str, ...] = ()
|
|
103
|
+
gpsd_host: str = "127.0.0.1"
|
|
104
|
+
gpsd_port: int = 2947
|
|
105
|
+
reticulum_config_path: Path | None = None
|
|
106
|
+
lxmf_router_config_path: Path | None = None
|
|
107
|
+
telemetry_filename: str = "telemetry.ini"
|
|
108
|
+
file_storage_path: Path | None = None
|
|
109
|
+
image_storage_path: Path | None = None
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@dataclass
|
|
113
|
+
class HubAppConfig: # pylint: disable=too-many-instance-attributes
|
|
114
|
+
"""Aggregated configuration for the telemetry hub runtime."""
|
|
115
|
+
|
|
116
|
+
storage_path: Path
|
|
117
|
+
database_path: Path
|
|
118
|
+
hub_database_path: Path
|
|
119
|
+
file_storage_path: Path
|
|
120
|
+
image_storage_path: Path
|
|
121
|
+
runtime: "HubRuntimeConfig"
|
|
122
|
+
reticulum: ReticulumConfig
|
|
123
|
+
lxmf_router: LXMFRouterConfig
|
|
124
|
+
app_name: str = "ReticulumTelemetryHub"
|
|
125
|
+
app_version: Optional[str] = None
|
|
126
|
+
app_description: str = ""
|
|
127
|
+
tak_connection: "TakConnectionConfig | None" = None
|
|
128
|
+
|
|
129
|
+
def to_reticulum_info_dict(self) -> dict:
|
|
130
|
+
"""Return a dict compatible with the ReticulumInfo schema.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
dict: Snapshot of the Reticulum runtime configuration.
|
|
134
|
+
"""
|
|
135
|
+
return {
|
|
136
|
+
"is_transport_enabled": self.reticulum.enable_transport,
|
|
137
|
+
"is_connected_to_shared_instance": self.reticulum.share_instance,
|
|
138
|
+
"reticulum_config_path": str(self.reticulum.path),
|
|
139
|
+
"database_path": str(self.database_path),
|
|
140
|
+
"storage_path": str(self.storage_path),
|
|
141
|
+
"file_storage_path": str(self.file_storage_path),
|
|
142
|
+
"image_storage_path": str(self.image_storage_path),
|
|
143
|
+
"rns_version": self._safe_get_version("RNS"),
|
|
144
|
+
"lxmf_version": self._safe_get_version("LXMF"),
|
|
145
|
+
"app_name": self.app_name or "ReticulumTelemetryHub",
|
|
146
|
+
"app_version": self.app_version
|
|
147
|
+
or self._safe_get_version("ReticulumTelemetryHub"),
|
|
148
|
+
"app_description": self.app_description or "",
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
@staticmethod
|
|
152
|
+
def _safe_get_version(distribution: str) -> str:
|
|
153
|
+
try:
|
|
154
|
+
return metadata.version(distribution)
|
|
155
|
+
except metadata.PackageNotFoundError:
|
|
156
|
+
return "unknown"
|
|
157
|
+
# Reason: metadata providers may raise unexpected runtime errors in constrained environments.
|
|
158
|
+
except Exception: # pylint: disable=broad-exception-caught
|
|
159
|
+
return "unknown"
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
@dataclass
|
|
163
|
+
class TakConnectionConfig: # pylint: disable=too-many-instance-attributes
|
|
164
|
+
"""Settings that control TAK/CoT connectivity."""
|
|
165
|
+
|
|
166
|
+
cot_url: str = "tcp://127.0.0.1:8087"
|
|
167
|
+
callsign: str = "RTH"
|
|
168
|
+
poll_interval_seconds: float = 30.0
|
|
169
|
+
keepalive_interval_seconds: float = 60.0
|
|
170
|
+
tls_client_cert: str | None = None
|
|
171
|
+
tls_client_key: str | None = None
|
|
172
|
+
tls_ca: str | None = None
|
|
173
|
+
tls_insecure: bool = False
|
|
174
|
+
tak_proto: int = 0
|
|
175
|
+
fts_compat: int = 1
|
|
176
|
+
|
|
177
|
+
def to_config_parser(self) -> ConfigParser:
|
|
178
|
+
"""Return a ConfigParser that PyTAK understands.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
ConfigParser: Parser configured with PyTAK-compatible values.
|
|
182
|
+
"""
|
|
183
|
+
|
|
184
|
+
parser = ConfigParser()
|
|
185
|
+
parser["fts"] = {
|
|
186
|
+
"COT_URL": self.cot_url,
|
|
187
|
+
"CALLSIGN": self.callsign,
|
|
188
|
+
"SSL_CLIENT_CERT": self.tls_client_cert or "",
|
|
189
|
+
"SSL_CLIENT_KEY": self.tls_client_key or "",
|
|
190
|
+
"SSL_CLIENT_CAFILE": self.tls_ca or "",
|
|
191
|
+
"SSL_VERIFY": "false" if self.tls_insecure else "true",
|
|
192
|
+
"TAK_PROTO": str(self.tak_proto),
|
|
193
|
+
"FTS_COMPAT": str(self.fts_compat),
|
|
194
|
+
}
|
|
195
|
+
return parser
|
|
196
|
+
|
|
197
|
+
def to_dict(self) -> dict:
|
|
198
|
+
"""Return a serialisable representation for debugging or logs.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
dict: Copy of the connector settings for display purposes.
|
|
202
|
+
"""
|
|
203
|
+
|
|
204
|
+
return {
|
|
205
|
+
"cot_url": self.cot_url,
|
|
206
|
+
"callsign": self.callsign,
|
|
207
|
+
"poll_interval_seconds": self.poll_interval_seconds,
|
|
208
|
+
"keepalive_interval_seconds": self.keepalive_interval_seconds,
|
|
209
|
+
"tls_client_cert": self.tls_client_cert,
|
|
210
|
+
"tls_client_key": self.tls_client_key,
|
|
211
|
+
"tls_ca": self.tls_ca,
|
|
212
|
+
"tls_insecure": self.tls_insecure,
|
|
213
|
+
"tak_proto": self.tak_proto,
|
|
214
|
+
"fts_compat": self.fts_compat,
|
|
215
|
+
}
|
|
@@ -0,0 +1,418 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import threading
|
|
4
|
+
import time
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
9
|
+
|
|
10
|
+
import LXMF
|
|
11
|
+
import RNS
|
|
12
|
+
from msgpack import packb
|
|
13
|
+
|
|
14
|
+
from reticulum_telemetry_hub.config.manager import HubConfigurationManager
|
|
15
|
+
from reticulum_telemetry_hub.lxmf_telemetry.model.persistance.sensors.lxmf_propagation import (
|
|
16
|
+
LXMFPropagation,
|
|
17
|
+
)
|
|
18
|
+
from reticulum_telemetry_hub.lxmf_telemetry.model.persistance.sensors.sensor_enum import (
|
|
19
|
+
SID_LXMF_PROPAGATION,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
if TYPE_CHECKING:
|
|
23
|
+
from reticulum_telemetry_hub.lxmf_telemetry.telemetry_controller import (
|
|
24
|
+
TelemetryController,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _utcnow() -> datetime:
|
|
29
|
+
return datetime.now(timezone.utc).replace(tzinfo=None)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class EmbeddedLxmdConfig:
|
|
34
|
+
"""Runtime configuration for the embedded LXMD service."""
|
|
35
|
+
|
|
36
|
+
enable_propagation_node: bool
|
|
37
|
+
announce_interval_seconds: int
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def from_manager(cls, manager: HubConfigurationManager) -> "EmbeddedLxmdConfig":
|
|
41
|
+
lxmf_config = manager.config.lxmf_router
|
|
42
|
+
interval = max(1, int(lxmf_config.announce_interval_minutes) * 60)
|
|
43
|
+
return cls(
|
|
44
|
+
enable_propagation_node=lxmf_config.enable_node,
|
|
45
|
+
announce_interval_seconds=interval,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class EmbeddedLxmd:
|
|
50
|
+
"""Run the LXMF router propagation loop within the current process.
|
|
51
|
+
|
|
52
|
+
The stock ``lxmd`` daemon starts a couple of helper threads that periodically
|
|
53
|
+
announces the delivery destination and, when configured, runs the propagation
|
|
54
|
+
node loop. When the hub is executed in *embedded* mode those responsibilities
|
|
55
|
+
need to run side-by-side with the main application instead of being spawned
|
|
56
|
+
as a separate process. ``EmbeddedLxmd`` mirrors the subset of ``lxmd``'s
|
|
57
|
+
behaviour that ReticulumTelemetryHub relies on and provides an explicit
|
|
58
|
+
lifecycle so the threads can be shut down gracefully.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
DEFERRED_JOBS_DELAY = 10
|
|
62
|
+
JOBS_INTERVAL_SECONDS = 5
|
|
63
|
+
|
|
64
|
+
PROPAGATION_UPTIME_GRANULARITY = 30
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
router: LXMF.LXMRouter,
|
|
69
|
+
destination: RNS.Destination,
|
|
70
|
+
config_manager: Optional[HubConfigurationManager] = None,
|
|
71
|
+
telemetry_controller: Optional[TelemetryController] = None,
|
|
72
|
+
) -> None:
|
|
73
|
+
self.router = router
|
|
74
|
+
self.destination = destination
|
|
75
|
+
self.config_manager = config_manager or HubConfigurationManager()
|
|
76
|
+
self.config = EmbeddedLxmdConfig.from_manager(self.config_manager)
|
|
77
|
+
self.telemetry_controller = telemetry_controller
|
|
78
|
+
self._propagation_observers: list[Callable[[dict[str, Any]], None]] = []
|
|
79
|
+
self._propagation_snapshot: bytes | None = None
|
|
80
|
+
self._propagation_lock = threading.Lock()
|
|
81
|
+
if self.telemetry_controller is not None:
|
|
82
|
+
self.add_propagation_observer(self._persist_propagation_snapshot)
|
|
83
|
+
self._stop_event = threading.Event()
|
|
84
|
+
self._threads: list[threading.Thread] = []
|
|
85
|
+
self._started = False
|
|
86
|
+
self._last_peer_announce: float | None = None
|
|
87
|
+
self._last_node_announce: float | None = None
|
|
88
|
+
|
|
89
|
+
def start(self) -> None:
|
|
90
|
+
"""Start the embedded propagation threads if not already running."""
|
|
91
|
+
|
|
92
|
+
if self._started:
|
|
93
|
+
return
|
|
94
|
+
|
|
95
|
+
if self.config.enable_propagation_node:
|
|
96
|
+
try:
|
|
97
|
+
self.router.enable_propagation()
|
|
98
|
+
except Exception as exc: # pragma: no cover - defensive logging
|
|
99
|
+
RNS.log(
|
|
100
|
+
f"Failed to enable LXMF propagation node in embedded mode: {exc}",
|
|
101
|
+
RNS.LOG_ERROR,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
self._started = True
|
|
105
|
+
self._start_thread(self._deferred_start_jobs)
|
|
106
|
+
|
|
107
|
+
def stop(self) -> None:
|
|
108
|
+
"""Request the helper threads to stop and wait for them to finish."""
|
|
109
|
+
|
|
110
|
+
if not self._started:
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
self._stop_event.set()
|
|
114
|
+
for thread in self._threads:
|
|
115
|
+
thread.join()
|
|
116
|
+
self._threads.clear()
|
|
117
|
+
# Allow future ``start`` calls to run the deferred jobs loop again.
|
|
118
|
+
self._stop_event.clear()
|
|
119
|
+
self._started = False
|
|
120
|
+
self._maybe_emit_propagation_update(force=True)
|
|
121
|
+
|
|
122
|
+
def add_propagation_observer(
|
|
123
|
+
self, observer: Callable[[dict[str, Any]], None]
|
|
124
|
+
) -> None:
|
|
125
|
+
"""Register a callback notified whenever propagation state changes."""
|
|
126
|
+
|
|
127
|
+
self._propagation_observers.append(observer)
|
|
128
|
+
|
|
129
|
+
# ------------------------------------------------------------------ #
|
|
130
|
+
# private helpers
|
|
131
|
+
# ------------------------------------------------------------------ #
|
|
132
|
+
def _start_thread(self, target) -> None:
|
|
133
|
+
thread = threading.Thread(target=target, daemon=True)
|
|
134
|
+
thread.start()
|
|
135
|
+
self._threads.append(thread)
|
|
136
|
+
|
|
137
|
+
def _announce_delivery(self) -> None:
|
|
138
|
+
try:
|
|
139
|
+
self.router.announce(self.destination.hash)
|
|
140
|
+
except Exception as exc: # pragma: no cover - logging guard
|
|
141
|
+
RNS.log(
|
|
142
|
+
f"Failed to announce embedded LXMF destination: {exc}",
|
|
143
|
+
RNS.LOG_ERROR,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
def _announce_propagation(self) -> None:
|
|
147
|
+
try:
|
|
148
|
+
self.router.announce_propagation_node()
|
|
149
|
+
except Exception as exc: # pragma: no cover - logging guard
|
|
150
|
+
RNS.log(
|
|
151
|
+
f"Failed to announce embedded propagation node: {exc}",
|
|
152
|
+
RNS.LOG_ERROR,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
def _baseline_propagation_payload(self) -> dict[str, Any]:
|
|
156
|
+
peers = getattr(self.router, "peers", {}) or {}
|
|
157
|
+
static_peers = getattr(self.router, "static_peers", []) or []
|
|
158
|
+
destination_hash = getattr(
|
|
159
|
+
getattr(self.router, "propagation_destination", None), "hash", None
|
|
160
|
+
)
|
|
161
|
+
identity_hash = getattr(getattr(self.router, "identity", None), "hash", None)
|
|
162
|
+
|
|
163
|
+
total_peers = len(peers)
|
|
164
|
+
return {
|
|
165
|
+
"destination_hash": destination_hash,
|
|
166
|
+
"identity_hash": identity_hash,
|
|
167
|
+
"uptime": None,
|
|
168
|
+
"delivery_limit": getattr(self.router, "delivery_per_transfer_limit", None),
|
|
169
|
+
"propagation_limit": getattr(
|
|
170
|
+
self.router, "propagation_per_transfer_limit", None
|
|
171
|
+
),
|
|
172
|
+
"autopeer_maxdepth": getattr(self.router, "autopeer_maxdepth", None),
|
|
173
|
+
"from_static_only": getattr(self.router, "from_static_only", None),
|
|
174
|
+
"messagestore": None,
|
|
175
|
+
"clients": None,
|
|
176
|
+
"unpeered_propagation_incoming": getattr(
|
|
177
|
+
self.router, "unpeered_propagation_incoming", None
|
|
178
|
+
),
|
|
179
|
+
"unpeered_propagation_rx_bytes": getattr(
|
|
180
|
+
self.router, "unpeered_propagation_rx_bytes", None
|
|
181
|
+
),
|
|
182
|
+
"static_peers": len(static_peers),
|
|
183
|
+
"total_peers": total_peers,
|
|
184
|
+
"active_peers": 0,
|
|
185
|
+
"unreachable_peers": total_peers,
|
|
186
|
+
"max_peers": getattr(self.router, "max_peers", None),
|
|
187
|
+
"peered_propagation_rx_bytes": 0,
|
|
188
|
+
"peered_propagation_tx_bytes": 0,
|
|
189
|
+
"peered_propagation_offered": 0,
|
|
190
|
+
"peered_propagation_outgoing": 0,
|
|
191
|
+
"peered_propagation_incoming": 0,
|
|
192
|
+
"peered_propagation_unhandled": 0,
|
|
193
|
+
"peered_propagation_max_unhandled": 0,
|
|
194
|
+
"peers": {},
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
def _normalize_propagation_stats(
|
|
198
|
+
self, stats: dict[str, Any] | None
|
|
199
|
+
) -> dict[str, Any]:
|
|
200
|
+
payload = self._baseline_propagation_payload()
|
|
201
|
+
if not stats:
|
|
202
|
+
return payload
|
|
203
|
+
|
|
204
|
+
payload.update(
|
|
205
|
+
{
|
|
206
|
+
"destination_hash": stats.get("destination_hash")
|
|
207
|
+
or payload["destination_hash"],
|
|
208
|
+
"identity_hash": stats.get("identity_hash") or payload["identity_hash"],
|
|
209
|
+
"uptime": stats.get("uptime"),
|
|
210
|
+
"delivery_limit": stats.get("delivery_limit"),
|
|
211
|
+
"propagation_limit": stats.get("propagation_limit"),
|
|
212
|
+
"autopeer_maxdepth": stats.get("autopeer_maxdepth"),
|
|
213
|
+
"from_static_only": stats.get("from_static_only"),
|
|
214
|
+
"messagestore": stats.get("messagestore"),
|
|
215
|
+
"clients": stats.get("clients"),
|
|
216
|
+
"unpeered_propagation_incoming": stats.get(
|
|
217
|
+
"unpeered_propagation_incoming"
|
|
218
|
+
),
|
|
219
|
+
"unpeered_propagation_rx_bytes": stats.get(
|
|
220
|
+
"unpeered_propagation_rx_bytes"
|
|
221
|
+
),
|
|
222
|
+
"static_peers": stats.get("static_peers", payload["static_peers"]),
|
|
223
|
+
"max_peers": stats.get("max_peers", payload["max_peers"]),
|
|
224
|
+
}
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
peers_payload: dict[bytes, dict[str, Any]] = {}
|
|
228
|
+
active = 0
|
|
229
|
+
rx_sum = tx_sum = offered_sum = outgoing_sum = incoming_sum = unhandled_sum = 0
|
|
230
|
+
max_unhandled = 0
|
|
231
|
+
|
|
232
|
+
peer_stats = stats.get("peers") or {}
|
|
233
|
+
for peer_hash, peer_data in sorted(
|
|
234
|
+
peer_stats.items(), key=lambda item: item[0]
|
|
235
|
+
):
|
|
236
|
+
if not isinstance(peer_hash, (bytes, bytearray, memoryview)):
|
|
237
|
+
continue
|
|
238
|
+
key = bytes(peer_hash)
|
|
239
|
+
messages = peer_data.get("messages") or {}
|
|
240
|
+
peers_payload[key] = {
|
|
241
|
+
"type": peer_data.get("type"),
|
|
242
|
+
"state": peer_data.get("state"),
|
|
243
|
+
"alive": peer_data.get("alive"),
|
|
244
|
+
"last_heard": peer_data.get("last_heard"),
|
|
245
|
+
"next_sync_attempt": peer_data.get("next_sync_attempt"),
|
|
246
|
+
"last_sync_attempt": peer_data.get("last_sync_attempt"),
|
|
247
|
+
"sync_backoff": peer_data.get("sync_backoff"),
|
|
248
|
+
"peering_timebase": peer_data.get("peering_timebase"),
|
|
249
|
+
"ler": peer_data.get("ler"),
|
|
250
|
+
"str": peer_data.get("str"),
|
|
251
|
+
"transfer_limit": peer_data.get("transfer_limit"),
|
|
252
|
+
"network_distance": peer_data.get("network_distance"),
|
|
253
|
+
"rx_bytes": peer_data.get("rx_bytes"),
|
|
254
|
+
"tx_bytes": peer_data.get("tx_bytes"),
|
|
255
|
+
"messages": {
|
|
256
|
+
"offered": messages.get("offered"),
|
|
257
|
+
"outgoing": messages.get("outgoing"),
|
|
258
|
+
"incoming": messages.get("incoming"),
|
|
259
|
+
"unhandled": messages.get("unhandled"),
|
|
260
|
+
},
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if peer_data.get("alive"):
|
|
264
|
+
active += 1
|
|
265
|
+
|
|
266
|
+
rx_sum += peer_data.get("rx_bytes") or 0
|
|
267
|
+
tx_sum += peer_data.get("tx_bytes") or 0
|
|
268
|
+
offered = messages.get("offered") or 0
|
|
269
|
+
outgoing = messages.get("outgoing") or 0
|
|
270
|
+
incoming = messages.get("incoming") or 0
|
|
271
|
+
unhandled = messages.get("unhandled") or 0
|
|
272
|
+
|
|
273
|
+
offered_sum += offered
|
|
274
|
+
outgoing_sum += outgoing
|
|
275
|
+
incoming_sum += incoming
|
|
276
|
+
unhandled_sum += unhandled
|
|
277
|
+
if unhandled > max_unhandled:
|
|
278
|
+
max_unhandled = unhandled
|
|
279
|
+
|
|
280
|
+
total_peers = stats.get("total_peers")
|
|
281
|
+
if total_peers is None:
|
|
282
|
+
total_peers = len(peers_payload)
|
|
283
|
+
|
|
284
|
+
payload.update(
|
|
285
|
+
{
|
|
286
|
+
"peers": peers_payload,
|
|
287
|
+
"total_peers": total_peers,
|
|
288
|
+
"active_peers": active,
|
|
289
|
+
"unreachable_peers": max(total_peers - active, 0),
|
|
290
|
+
"peered_propagation_rx_bytes": rx_sum,
|
|
291
|
+
"peered_propagation_tx_bytes": tx_sum,
|
|
292
|
+
"peered_propagation_offered": offered_sum,
|
|
293
|
+
"peered_propagation_outgoing": outgoing_sum,
|
|
294
|
+
"peered_propagation_incoming": incoming_sum,
|
|
295
|
+
"peered_propagation_unhandled": unhandled_sum,
|
|
296
|
+
"peered_propagation_max_unhandled": max_unhandled,
|
|
297
|
+
}
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
return payload
|
|
301
|
+
|
|
302
|
+
def _build_propagation_payload(self) -> dict[str, Any] | None:
|
|
303
|
+
try:
|
|
304
|
+
stats = self.router.compile_stats()
|
|
305
|
+
except Exception as exc: # pragma: no cover - defensive logging
|
|
306
|
+
RNS.log(
|
|
307
|
+
f"Failed to compile LXMF propagation stats: {exc}",
|
|
308
|
+
RNS.LOG_ERROR,
|
|
309
|
+
)
|
|
310
|
+
return None
|
|
311
|
+
|
|
312
|
+
return self._normalize_propagation_stats(stats)
|
|
313
|
+
|
|
314
|
+
def _maybe_emit_propagation_update(self, *, force: bool = False) -> None:
|
|
315
|
+
if not self._propagation_observers:
|
|
316
|
+
return
|
|
317
|
+
|
|
318
|
+
payload = self._build_propagation_payload()
|
|
319
|
+
if payload is None:
|
|
320
|
+
return
|
|
321
|
+
|
|
322
|
+
comparison_payload = dict(payload)
|
|
323
|
+
uptime = comparison_payload.get("uptime")
|
|
324
|
+
if uptime is not None:
|
|
325
|
+
comparison_payload["uptime"] = (
|
|
326
|
+
int(uptime) // self.PROPAGATION_UPTIME_GRANULARITY
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
packed = packb(comparison_payload, use_bin_type=True)
|
|
330
|
+
|
|
331
|
+
with self._propagation_lock:
|
|
332
|
+
if not force and packed == self._propagation_snapshot:
|
|
333
|
+
return
|
|
334
|
+
self._propagation_snapshot = packed
|
|
335
|
+
|
|
336
|
+
self._notify_propagation_observers(payload)
|
|
337
|
+
|
|
338
|
+
def _notify_propagation_observers(self, payload: dict[str, Any]) -> None:
|
|
339
|
+
for observer in list(self._propagation_observers):
|
|
340
|
+
try:
|
|
341
|
+
observer(payload)
|
|
342
|
+
except Exception as exc: # pragma: no cover - defensive logging
|
|
343
|
+
RNS.log(
|
|
344
|
+
f"Propagation observer failed: {exc}",
|
|
345
|
+
RNS.LOG_ERROR,
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
def _persist_propagation_snapshot(self, payload: dict[str, Any]) -> None:
|
|
349
|
+
if self.telemetry_controller is None:
|
|
350
|
+
return
|
|
351
|
+
|
|
352
|
+
sensor = LXMFPropagation()
|
|
353
|
+
sensor.unpack(payload)
|
|
354
|
+
packed_payload = sensor.pack()
|
|
355
|
+
if packed_payload is None:
|
|
356
|
+
return
|
|
357
|
+
|
|
358
|
+
peer_hash = (
|
|
359
|
+
RNS.hexrep(self.destination.hash, False)
|
|
360
|
+
if hasattr(self.destination, "hash")
|
|
361
|
+
else ""
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
try:
|
|
365
|
+
self.telemetry_controller.save_telemetry(
|
|
366
|
+
{SID_LXMF_PROPAGATION: packed_payload},
|
|
367
|
+
peer_hash,
|
|
368
|
+
_utcnow(),
|
|
369
|
+
)
|
|
370
|
+
except Exception as exc: # pragma: no cover - defensive logging
|
|
371
|
+
RNS.log(
|
|
372
|
+
f"Failed to persist propagation telemetry: {exc}",
|
|
373
|
+
RNS.LOG_ERROR,
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
def _deferred_start_jobs(self) -> None:
|
|
377
|
+
if self._stop_event.wait(self.DEFERRED_JOBS_DELAY):
|
|
378
|
+
return
|
|
379
|
+
|
|
380
|
+
self._announce_delivery()
|
|
381
|
+
self._last_peer_announce = time.monotonic()
|
|
382
|
+
|
|
383
|
+
if self.config.enable_propagation_node:
|
|
384
|
+
self._announce_propagation()
|
|
385
|
+
self._last_node_announce = self._last_peer_announce
|
|
386
|
+
|
|
387
|
+
self._maybe_emit_propagation_update(force=True)
|
|
388
|
+
self._start_thread(self._jobs)
|
|
389
|
+
|
|
390
|
+
def _jobs(self) -> None:
|
|
391
|
+
interval = self.config.announce_interval_seconds
|
|
392
|
+
while not self._stop_event.wait(self.JOBS_INTERVAL_SECONDS):
|
|
393
|
+
self._maybe_emit_propagation_update()
|
|
394
|
+
now = time.monotonic()
|
|
395
|
+
if (
|
|
396
|
+
self._last_peer_announce is None
|
|
397
|
+
or now - self._last_peer_announce >= interval
|
|
398
|
+
):
|
|
399
|
+
self._announce_delivery()
|
|
400
|
+
self._last_peer_announce = now
|
|
401
|
+
|
|
402
|
+
if not self.config.enable_propagation_node:
|
|
403
|
+
continue
|
|
404
|
+
|
|
405
|
+
if (
|
|
406
|
+
self._last_node_announce is None
|
|
407
|
+
or now - self._last_node_announce >= interval
|
|
408
|
+
):
|
|
409
|
+
self._announce_propagation()
|
|
410
|
+
self._last_node_announce = now
|
|
411
|
+
|
|
412
|
+
# Allow usage as a context manager for convenience
|
|
413
|
+
def __enter__(self) -> "EmbeddedLxmd":
|
|
414
|
+
self.start()
|
|
415
|
+
return self
|
|
416
|
+
|
|
417
|
+
def __exit__(self, exc_type, exc, tb) -> None:
|
|
418
|
+
self.stop()
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Internal API package for process-boundary contracts."""
|
|
2
|
+
|
|
3
|
+
from reticulum_telemetry_hub.internal_api.bus import CommandBus as CommandBus
|
|
4
|
+
from reticulum_telemetry_hub.internal_api.bus import EventBus as EventBus
|
|
5
|
+
from reticulum_telemetry_hub.internal_api.bus import (
|
|
6
|
+
InProcessCommandBus as InProcessCommandBus,
|
|
7
|
+
)
|
|
8
|
+
from reticulum_telemetry_hub.internal_api.bus import InProcessEventBus as InProcessEventBus
|
|
9
|
+
from reticulum_telemetry_hub.internal_api.bus import InProcessQueryBus as InProcessQueryBus
|
|
10
|
+
from reticulum_telemetry_hub.internal_api.bus import QueryBus as QueryBus
|
|
11
|
+
from reticulum_telemetry_hub.internal_api.core import InternalApiCore as InternalApiCore
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"CommandBus",
|
|
15
|
+
"EventBus",
|
|
16
|
+
"InProcessCommandBus",
|
|
17
|
+
"InProcessEventBus",
|
|
18
|
+
"InProcessQueryBus",
|
|
19
|
+
"QueryBus",
|
|
20
|
+
"InternalApiCore",
|
|
21
|
+
]
|