ReticulumTelemetryHub 0.1.0__py3-none-any.whl → 0.143.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. reticulum_telemetry_hub/api/__init__.py +23 -0
  2. reticulum_telemetry_hub/api/models.py +323 -0
  3. reticulum_telemetry_hub/api/service.py +836 -0
  4. reticulum_telemetry_hub/api/storage.py +528 -0
  5. reticulum_telemetry_hub/api/storage_base.py +156 -0
  6. reticulum_telemetry_hub/api/storage_models.py +118 -0
  7. reticulum_telemetry_hub/atak_cot/__init__.py +49 -0
  8. reticulum_telemetry_hub/atak_cot/base.py +277 -0
  9. reticulum_telemetry_hub/atak_cot/chat.py +506 -0
  10. reticulum_telemetry_hub/atak_cot/detail.py +235 -0
  11. reticulum_telemetry_hub/atak_cot/event.py +181 -0
  12. reticulum_telemetry_hub/atak_cot/pytak_client.py +569 -0
  13. reticulum_telemetry_hub/atak_cot/tak_connector.py +848 -0
  14. reticulum_telemetry_hub/config/__init__.py +25 -0
  15. reticulum_telemetry_hub/config/constants.py +7 -0
  16. reticulum_telemetry_hub/config/manager.py +515 -0
  17. reticulum_telemetry_hub/config/models.py +215 -0
  18. reticulum_telemetry_hub/embedded_lxmd/__init__.py +5 -0
  19. reticulum_telemetry_hub/embedded_lxmd/embedded.py +418 -0
  20. reticulum_telemetry_hub/internal_api/__init__.py +21 -0
  21. reticulum_telemetry_hub/internal_api/bus.py +344 -0
  22. reticulum_telemetry_hub/internal_api/core.py +690 -0
  23. reticulum_telemetry_hub/internal_api/v1/__init__.py +74 -0
  24. reticulum_telemetry_hub/internal_api/v1/enums.py +109 -0
  25. reticulum_telemetry_hub/internal_api/v1/manifest.json +8 -0
  26. reticulum_telemetry_hub/internal_api/v1/schemas.py +478 -0
  27. reticulum_telemetry_hub/internal_api/versioning.py +63 -0
  28. reticulum_telemetry_hub/lxmf_daemon/Handlers.py +122 -0
  29. reticulum_telemetry_hub/lxmf_daemon/LXMF.py +252 -0
  30. reticulum_telemetry_hub/lxmf_daemon/LXMPeer.py +898 -0
  31. reticulum_telemetry_hub/lxmf_daemon/LXMRouter.py +4227 -0
  32. reticulum_telemetry_hub/lxmf_daemon/LXMessage.py +1006 -0
  33. reticulum_telemetry_hub/lxmf_daemon/LXStamper.py +490 -0
  34. reticulum_telemetry_hub/lxmf_daemon/__init__.py +10 -0
  35. reticulum_telemetry_hub/lxmf_daemon/_version.py +1 -0
  36. reticulum_telemetry_hub/lxmf_daemon/lxmd.py +1655 -0
  37. reticulum_telemetry_hub/lxmf_telemetry/model/fields/field_telemetry_stream.py +6 -0
  38. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/__init__.py +3 -0
  39. {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/appearance.py +19 -19
  40. {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/peer.py +17 -13
  41. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/__init__.py +65 -0
  42. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/acceleration.py +68 -0
  43. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/ambient_light.py +37 -0
  44. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/angular_velocity.py +68 -0
  45. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/battery.py +68 -0
  46. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/connection_map.py +258 -0
  47. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/generic.py +841 -0
  48. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/gravity.py +68 -0
  49. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/humidity.py +37 -0
  50. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/information.py +42 -0
  51. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/location.py +110 -0
  52. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/lxmf_propagation.py +429 -0
  53. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/magnetic_field.py +68 -0
  54. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/physical_link.py +53 -0
  55. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/pressure.py +37 -0
  56. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/proximity.py +37 -0
  57. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/received.py +75 -0
  58. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/rns_transport.py +209 -0
  59. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor.py +65 -0
  60. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor_enum.py +27 -0
  61. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/sensor_mapping.py +58 -0
  62. reticulum_telemetry_hub/lxmf_telemetry/model/persistance/sensors/temperature.py +37 -0
  63. {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/sensors/time.py +36 -32
  64. {lxmf_telemetry → reticulum_telemetry_hub/lxmf_telemetry}/model/persistance/telemeter.py +26 -23
  65. reticulum_telemetry_hub/lxmf_telemetry/sampler.py +229 -0
  66. reticulum_telemetry_hub/lxmf_telemetry/telemeter_manager.py +409 -0
  67. reticulum_telemetry_hub/lxmf_telemetry/telemetry_controller.py +804 -0
  68. reticulum_telemetry_hub/northbound/__init__.py +5 -0
  69. reticulum_telemetry_hub/northbound/app.py +195 -0
  70. reticulum_telemetry_hub/northbound/auth.py +119 -0
  71. reticulum_telemetry_hub/northbound/gateway.py +310 -0
  72. reticulum_telemetry_hub/northbound/internal_adapter.py +302 -0
  73. reticulum_telemetry_hub/northbound/models.py +213 -0
  74. reticulum_telemetry_hub/northbound/routes_chat.py +123 -0
  75. reticulum_telemetry_hub/northbound/routes_files.py +119 -0
  76. reticulum_telemetry_hub/northbound/routes_rest.py +345 -0
  77. reticulum_telemetry_hub/northbound/routes_subscribers.py +150 -0
  78. reticulum_telemetry_hub/northbound/routes_topics.py +178 -0
  79. reticulum_telemetry_hub/northbound/routes_ws.py +107 -0
  80. reticulum_telemetry_hub/northbound/serializers.py +72 -0
  81. reticulum_telemetry_hub/northbound/services.py +373 -0
  82. reticulum_telemetry_hub/northbound/websocket.py +855 -0
  83. reticulum_telemetry_hub/reticulum_server/__main__.py +2237 -0
  84. reticulum_telemetry_hub/reticulum_server/command_manager.py +1268 -0
  85. reticulum_telemetry_hub/reticulum_server/command_text.py +399 -0
  86. reticulum_telemetry_hub/reticulum_server/constants.py +1 -0
  87. reticulum_telemetry_hub/reticulum_server/event_log.py +357 -0
  88. reticulum_telemetry_hub/reticulum_server/internal_adapter.py +358 -0
  89. reticulum_telemetry_hub/reticulum_server/outbound_queue.py +312 -0
  90. reticulum_telemetry_hub/reticulum_server/services.py +422 -0
  91. reticulumtelemetryhub-0.143.0.dist-info/METADATA +181 -0
  92. reticulumtelemetryhub-0.143.0.dist-info/RECORD +97 -0
  93. {reticulumtelemetryhub-0.1.0.dist-info → reticulumtelemetryhub-0.143.0.dist-info}/WHEEL +1 -1
  94. reticulumtelemetryhub-0.143.0.dist-info/licenses/LICENSE +277 -0
  95. lxmf_telemetry/model/fields/field_telemetry_stream.py +0 -7
  96. lxmf_telemetry/model/persistance/__init__.py +0 -3
  97. lxmf_telemetry/model/persistance/sensors/location.py +0 -69
  98. lxmf_telemetry/model/persistance/sensors/magnetic_field.py +0 -36
  99. lxmf_telemetry/model/persistance/sensors/sensor.py +0 -44
  100. lxmf_telemetry/model/persistance/sensors/sensor_enum.py +0 -24
  101. lxmf_telemetry/model/persistance/sensors/sensor_mapping.py +0 -9
  102. lxmf_telemetry/telemetry_controller.py +0 -124
  103. reticulum_server/main.py +0 -182
  104. reticulumtelemetryhub-0.1.0.dist-info/METADATA +0 -15
  105. reticulumtelemetryhub-0.1.0.dist-info/RECORD +0 -19
  106. {lxmf_telemetry → reticulum_telemetry_hub}/__init__.py +0 -0
  107. {lxmf_telemetry/model/persistance/sensors → reticulum_telemetry_hub/lxmf_telemetry}/__init__.py +0 -0
  108. {reticulum_server → reticulum_telemetry_hub/reticulum_server}/__init__.py +0 -0
@@ -0,0 +1,569 @@
1
+ """PyTAK client helpers for sending and receiving Cursor on Target events."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import atexit
7
+ import logging
8
+ import sys
9
+ import types
10
+ import weakref
11
+ import xml.etree.ElementTree as ET
12
+ from configparser import ConfigParser, SectionProxy
13
+ from contextlib import suppress
14
+ from importlib.util import find_spec
15
+ from threading import Event as ThreadEvent
16
+ from threading import Lock
17
+ from threading import Thread
18
+ from typing import Any, Awaitable, Iterable, Optional, Union, cast
19
+
20
+ import RNS
21
+ if find_spec("aiohttp") is None:
22
+ aiohttp_stub = types.ModuleType("aiohttp")
23
+
24
+ class ClientSession: # pylint: disable=too-few-public-methods
25
+ """Fallback aiohttp ClientSession used for pytak import-time typing."""
26
+
27
+ aiohttp_stub.ClientSession = ClientSession
28
+ sys.modules.setdefault("aiohttp", aiohttp_stub)
29
+ import pytak
30
+
31
+ from . import Event
32
+
33
+ CotPayload = Union[Event, ET.Element, str, bytes, dict]
34
+
35
+
36
+ def _shutdown_weak(ref: "weakref.ReferenceType[PytakClient]") -> None:
37
+ """Invoke shutdown on a weakly referenced :class:`PytakClient`."""
38
+
39
+ client = ref()
40
+ if client is None:
41
+ return
42
+ client._shutdown_sync() # pylint: disable=protected-access
43
+
44
+
45
+ def _is_iterable_payload(obj: Any) -> bool:
46
+ """Return True when the object should be treated as a payload collection."""
47
+ if isinstance(obj, (Event, ET.Element, str, bytes, dict)):
48
+ return False
49
+ return isinstance(obj, Iterable)
50
+
51
+
52
+ def _payload_to_xml_bytes(payload: CotPayload) -> bytes:
53
+ """Convert supported payload types into ATAK XML bytes."""
54
+ if isinstance(payload, Event):
55
+ return payload.to_xml_bytes()
56
+ if isinstance(payload, ET.Element):
57
+ return ET.tostring(payload, encoding="utf-8")
58
+ if isinstance(payload, bytes):
59
+ return payload
60
+ if isinstance(payload, str):
61
+ return payload.encode("utf-8")
62
+ if isinstance(payload, dict):
63
+ return Event.from_dict(payload).to_xml_bytes()
64
+ raise TypeError(f"Unsupported payload type: {type(payload)!r}")
65
+
66
+
67
+ class SendWorker(pytak.QueueWorker):
68
+ """pyTAK worker that pushes ATAK CoT XML payloads onto the TX queue."""
69
+
70
+ def __init__(
71
+ self,
72
+ queue: asyncio.Queue,
73
+ config: SectionProxy,
74
+ message: Union[CotPayload, Iterable[CotPayload]],
75
+ ) -> None:
76
+ super().__init__(queue, config)
77
+ # Ensure a concrete list of CotPayload so the type checker knows
78
+ # iterating yields a CotPayload for handle_data(...)
79
+ self._messages: list[CotPayload]
80
+ if _is_iterable_payload(message):
81
+ self._messages = list(cast(Iterable[CotPayload], message))
82
+ else:
83
+ self._messages = [cast(CotPayload, message)]
84
+
85
+ async def handle_data(self, data: CotPayload) -> None:
86
+ await self.put_queue(_payload_to_xml_bytes(data))
87
+
88
+ async def run(self, number_of_iterations: int = 0):
89
+ _ = number_of_iterations
90
+ for payload in self._messages:
91
+ await self.handle_data(payload)
92
+
93
+
94
+ class ReceiveWorker(pytak.QueueWorker):
95
+ """pyTAK worker that optionally parses incoming CoT XML into Event objects."""
96
+
97
+ def __init__(
98
+ self, queue: asyncio.Queue, config: SectionProxy, parse: bool = True
99
+ ) -> None:
100
+ super().__init__(queue, config)
101
+ self._parse = parse
102
+ # store parsed or raw data here so callers can inspect worker instances
103
+ self.result: Optional[Any] = None
104
+
105
+ async def handle_data(self, data: Any) -> None:
106
+ """Parse queue data into an Event when requested."""
107
+
108
+ if not self._parse:
109
+ self.result = data
110
+ return
111
+ try:
112
+ self.result = Event.from_xml(data)
113
+ except (ET.ParseError, TypeError, ValueError, AttributeError):
114
+ self.result = data
115
+
116
+ async def run(self, number_of_iterations: int = 0) -> None:
117
+ _ = number_of_iterations
118
+ try:
119
+ data = await self.queue.get()
120
+ except (asyncio.CancelledError, RuntimeError):
121
+ return None
122
+ await self.handle_data(data)
123
+ return None
124
+
125
+
126
+ class StreamSendWorker(SendWorker):
127
+ """Continuous send worker that drains an outbound queue."""
128
+
129
+ def __init__(
130
+ self,
131
+ queue: asyncio.Queue,
132
+ config: SectionProxy,
133
+ outbound_queue: asyncio.Queue,
134
+ stop_event: asyncio.Event,
135
+ ) -> None:
136
+ super().__init__(queue, config, [])
137
+ self._outbound_queue = outbound_queue
138
+ self._stop_event = stop_event
139
+
140
+ async def run(self, number_of_iterations: int = 0):
141
+ iterations = 0
142
+ while not self._stop_event.is_set():
143
+ if number_of_iterations and iterations >= number_of_iterations:
144
+ return None
145
+ try:
146
+ payload = await asyncio.wait_for(
147
+ self._outbound_queue.get(), timeout=0.2
148
+ )
149
+ except asyncio.TimeoutError:
150
+ continue
151
+ except (asyncio.CancelledError, RuntimeError):
152
+ return None
153
+ await self.handle_data(payload)
154
+ iterations += 1
155
+
156
+
157
+ class FTSCLITool(pytak.CLITool):
158
+ """PyTAK CLI tool wrapper that tracks coroutine tasks for testing."""
159
+
160
+ def __init__(
161
+ self,
162
+ config: ConfigParser,
163
+ tx_queue: Union[asyncio.Queue, None] = None,
164
+ rx_queue: Union[asyncio.Queue, None] = None,
165
+ ) -> None:
166
+ self.config_parser = config if isinstance(config, ConfigParser) else None
167
+ section: ConfigParser | SectionProxy
168
+ if isinstance(config, ConfigParser):
169
+ section = (
170
+ config[config.sections()[0]] if config.sections() else config["DEFAULT"]
171
+ )
172
+ else:
173
+ section = config
174
+ super().__init__(section, tx_queue, rx_queue)
175
+ self.section = section
176
+ self.tasks_to_complete = set()
177
+ self.running_c_tasks = set()
178
+ # store results from the last run here
179
+ self.results: list[Any] = []
180
+
181
+ def add_c_task(self, task):
182
+ """Register a coroutine worker task to run alongside pyTAK tasks."""
183
+
184
+ self.tasks_to_complete.add(task)
185
+
186
+ def run_c_task(self, task):
187
+ """Schedule a coroutine worker task and keep a handle for teardown."""
188
+
189
+ self.running_c_tasks.add(asyncio.ensure_future(task.run()))
190
+
191
+ def run_c_tasks(self, tasks=None):
192
+ """Schedule all coroutine worker tasks."""
193
+
194
+ tasks = tasks or self.tasks_to_complete
195
+ for task in tasks:
196
+ self.run_c_task(task)
197
+
198
+ async def setup(self) -> None:
199
+ """Connect to the configured TAK server and log outcomes."""
200
+
201
+ cot_url = self.config.get("COT_URL", "")
202
+ try:
203
+ await super().setup()
204
+ except Exception as exc: # pylint: disable=broad-exception-caught
205
+ self._logger.error(
206
+ "Failed to connect to TAK server at %s: %s", cot_url or "unknown", exc
207
+ )
208
+ RNS.log(
209
+ f"Failed to connect to TAK server at {cot_url or 'unknown'}: {exc}",
210
+ RNS.LOG_ERROR,
211
+ )
212
+ raise
213
+ self._logger.info("Connected to TAK server at %s", cot_url or "unknown")
214
+ RNS.log(f"Connected to TAK server at {cot_url or 'unknown'}", RNS.LOG_INFO)
215
+
216
+ async def run(self, number_of_iterations: int = 0) -> None:
217
+ """Runs this Thread and its associated coroutine tasks."""
218
+ _ = number_of_iterations
219
+ self._logger.info("Run: %s", self.__class__)
220
+
221
+ self.run_tasks()
222
+ self.run_c_tasks()
223
+
224
+ _done, _ = await asyncio.wait(
225
+ self.running_c_tasks, return_when=asyncio.ALL_COMPLETED
226
+ )
227
+
228
+ # Give the TX/RX workers a moment to drain the queues before cancelling
229
+ # them. Without this pause, the main loop could cancel the TX worker
230
+ # before it flushes the enqueued CoT payload.
231
+ await asyncio.sleep(getattr(self, "min_period", 0.1) or 0.1)
232
+
233
+ results: list[Any] = []
234
+
235
+ # Collect results from worker instances (ReceiveWorker stores parsed data
236
+ # on .result) instead of relying on coroutine return values.
237
+ for task in self.tasks_to_complete:
238
+ res = getattr(task, "result", None)
239
+ if res is not None:
240
+ results.append(res)
241
+
242
+ # Close TX and RX workers aka connection to Server
243
+ for task in self.running_tasks:
244
+ task.cancel()
245
+
246
+ # store results on the instance and return None to match base class
247
+ self.results = results
248
+ return None
249
+
250
+
251
+ class PytakWorkerManager: # pylint: disable=too-many-instance-attributes
252
+ """Manage a persistent PyTAK CLI tool and worker queue."""
253
+
254
+ def __init__(
255
+ self, cli_tool: FTSCLITool, section: SectionProxy, parse_inbound: bool
256
+ ) -> None:
257
+ self.cli_tool = cli_tool
258
+ self.section = section
259
+ self.parse_inbound = parse_inbound
260
+ self._outbound: asyncio.Queue = asyncio.Queue()
261
+ self._stop_event = asyncio.Event()
262
+ self._results: list[Any] = []
263
+ self._task: Optional[asyncio.Task] = None
264
+ self._session_task: Optional[asyncio.Task] = None
265
+ self._logger = getattr(cli_tool, "_logger", logging.getLogger(__name__))
266
+ self._backoff_seconds = 1.0
267
+
268
+ async def start(self) -> None:
269
+ """Start the long-running PyTAK session if it is not active."""
270
+
271
+ if self._stop_event.is_set():
272
+ self._stop_event = asyncio.Event()
273
+ if self._task is None or self._task.done():
274
+ self._task = asyncio.create_task(self._run_session())
275
+
276
+ async def stop(self) -> None:
277
+ """Stop the PyTAK session and cancel worker tasks."""
278
+
279
+ self._stop_event.set()
280
+ if self._task is not None:
281
+ self._task.cancel()
282
+ with suppress(asyncio.CancelledError):
283
+ await self._task
284
+ self._task = None
285
+ self._session_task = None
286
+
287
+ async def enqueue(self, message: CotPayload) -> None:
288
+ """Queue a payload for transmission over the active session."""
289
+
290
+ await self._outbound.put(message)
291
+
292
+ def results(self) -> list[Any]:
293
+ """Return results collected from the most recent receive worker."""
294
+
295
+ return list(self._results)
296
+
297
+ async def _run_session(self) -> None:
298
+ """Run a PyTAK session with exponential backoff on failures."""
299
+ while not self._stop_event.is_set():
300
+ send_stop = asyncio.Event()
301
+ try:
302
+ await self.cli_tool.setup()
303
+ self.cli_tool.tasks_to_complete.clear()
304
+ self.cli_tool.running_c_tasks.clear()
305
+
306
+ send_worker = StreamSendWorker(
307
+ cast(asyncio.Queue, self.cli_tool.tx_queue),
308
+ self.section,
309
+ self._outbound,
310
+ send_stop,
311
+ )
312
+ receive_worker = ReceiveWorker(
313
+ cast(asyncio.Queue, self.cli_tool.rx_queue),
314
+ self.section,
315
+ parse=self.parse_inbound,
316
+ )
317
+
318
+ self.cli_tool.add_c_task(send_worker)
319
+ self.cli_tool.add_c_task(receive_worker)
320
+ self._results.clear()
321
+
322
+ self._session_task = asyncio.create_task(self.cli_tool.run())
323
+ try:
324
+ await self._session_task
325
+ finally:
326
+ send_stop.set()
327
+ if self._session_task:
328
+ self._session_task.cancel()
329
+ with suppress(asyncio.CancelledError):
330
+ await self._session_task
331
+
332
+ if getattr(receive_worker, "result", None) is not None:
333
+ self._results.append(receive_worker.result)
334
+ except asyncio.CancelledError:
335
+ send_stop.set()
336
+ if self._session_task is not None:
337
+ self._session_task.cancel()
338
+ with suppress(asyncio.CancelledError):
339
+ await self._session_task
340
+ raise
341
+ except Exception as exc: # pragma: no cover - defensive logging # pylint: disable=broad-exception-caught
342
+ send_stop.set()
343
+ self._logger.error("PyTAK session error: %s", exc)
344
+ await asyncio.sleep(self._backoff_seconds)
345
+ self._backoff_seconds = min(self._backoff_seconds * 2, 30.0)
346
+ else:
347
+ send_stop.set()
348
+ self._backoff_seconds = 1.0
349
+ return None
350
+
351
+
352
+ class PytakClient: # pylint: disable=too-many-instance-attributes
353
+ """Utility wrapper that wires ATAK Event payloads into pyTAK workers."""
354
+
355
+ def __init__(self, config: Optional[ConfigParser] = None) -> None:
356
+ self._config = config
357
+ self._cli_tool: Optional[FTSCLITool] = None
358
+ self._worker_manager: Optional[PytakWorkerManager] = None
359
+ self._loop: Optional[asyncio.AbstractEventLoop] = None
360
+ self._loop_thread: Optional[Thread] = None
361
+ self._loop_ready = ThreadEvent()
362
+ self._loop_lock = Lock()
363
+ atexit.register(_shutdown_weak, weakref.ref(self))
364
+
365
+ def __del__(self) -> None:
366
+ try:
367
+ self._shutdown_sync() # pylint: disable=protected-access
368
+ except Exception: # pylint: disable=broad-exception-caught
369
+ pass
370
+
371
+ def _setup_config(self) -> ConfigParser:
372
+ """Create config if a custom one is not passed."""
373
+ config = ConfigParser()
374
+ config["fts"] = {
375
+ "COT_URL": "tcp://127.0.0.1:8087",
376
+ "CALLSIGN": "FTS_PYTAK",
377
+ "TAK_PROTO": "0",
378
+ "FTS_COMPAT": "1",
379
+ }
380
+ return config
381
+
382
+ def _ensure_config(self, config: Optional[ConfigParser]) -> ConfigParser:
383
+ """
384
+ Ensure a configuration object is present for PyTAK workers.
385
+
386
+ Args:
387
+ config (ConfigParser | None): Custom configuration provided by the caller.
388
+
389
+ Returns:
390
+ ConfigParser: The configuration to use for PyTAK interactions.
391
+ """
392
+ if config is not None:
393
+ if self._config is None:
394
+ self._config = config
395
+ return config
396
+ if self._config is None:
397
+ self._config = self._setup_config()
398
+ return self._config
399
+
400
+ def _config_section(
401
+ self, config: ConfigParser, section: str = "fts"
402
+ ) -> SectionProxy:
403
+ """
404
+ Return the requested section or a fallback from a configuration object.
405
+
406
+ Args:
407
+ config (ConfigParser): Configuration containing PyTAK settings.
408
+ section (str): Desired section name. Defaults to ``"fts"``.
409
+
410
+ Returns:
411
+ SectionProxy: Section with connection parameters.
412
+
413
+ Raises:
414
+ ValueError: If the configuration has no sections.
415
+ """
416
+ if config.has_section(section):
417
+ return config[section]
418
+ sections = config.sections()
419
+ if sections:
420
+ return config[sections[0]]
421
+ raise ValueError("Configuration must contain at least one section.")
422
+
423
+ def _ensure_cli_tool(self, config: ConfigParser) -> FTSCLITool:
424
+ """Create or return a cached CLI tool backed by shared queues."""
425
+
426
+ if self._cli_tool is None:
427
+ tx_queue: asyncio.Queue = asyncio.Queue()
428
+ rx_queue: asyncio.Queue = asyncio.Queue()
429
+ self._cli_tool = FTSCLITool(config, tx_queue, rx_queue)
430
+ return self._cli_tool
431
+
432
+ def _ensure_manager(
433
+ self, config: ConfigParser, parse_inbound: bool
434
+ ) -> "PytakWorkerManager":
435
+ """
436
+ Return a running worker manager with the provided configuration.
437
+
438
+ Args:
439
+ config (ConfigParser): PyTAK configuration to apply.
440
+ parse_inbound (bool): Whether inbound CoT data should be parsed.
441
+
442
+ Returns:
443
+ PytakWorkerManager: The configured worker manager.
444
+ """
445
+
446
+ cli_tool = self._ensure_cli_tool(config)
447
+ if self._worker_manager is None:
448
+ section = self._config_section(config)
449
+ self._worker_manager = PytakWorkerManager(cli_tool, section, parse_inbound)
450
+ else:
451
+ self._worker_manager.parse_inbound = parse_inbound
452
+ return self._worker_manager
453
+
454
+ async def create_and_send_message(
455
+ self,
456
+ message: Union[CotPayload, Iterable[CotPayload]],
457
+ config: Optional[ConfigParser] = None,
458
+ parse_inbound: bool = True,
459
+ ) -> list[Any]:
460
+ """
461
+ Send one or more CoT payloads through a PyTAK worker session.
462
+
463
+ Args:
464
+ message (CotPayload | Iterable[CotPayload]): Payload(s) to dispatch.
465
+ config (ConfigParser | None): Optional configuration override.
466
+ parse_inbound (bool): Whether to parse inbound data into :class:`Event`.
467
+
468
+ Returns:
469
+ list[Any]: Parsed or raw results from the receive worker.
470
+ """
471
+ cfg = self._ensure_config(config)
472
+ manager = self._ensure_manager(cfg, parse_inbound)
473
+ await self._run_in_loop(manager.start())
474
+ await self._run_in_loop(manager.enqueue(message))
475
+ return manager.results()
476
+
477
+ async def send_event(
478
+ self,
479
+ event: Event,
480
+ config: Optional[ConfigParser] = None,
481
+ parse_inbound: bool = True,
482
+ ):
483
+ """Convenience helper that sends a single Event."""
484
+ return await self.create_and_send_message(
485
+ event, config=config, parse_inbound=parse_inbound
486
+ )
487
+
488
+ @staticmethod
489
+ def _start_loop(
490
+ loop: asyncio.AbstractEventLoop, ready_event: ThreadEvent
491
+ ) -> None:
492
+ """
493
+ Start the event loop on a dedicated thread and signal readiness.
494
+
495
+ Args:
496
+ loop (asyncio.AbstractEventLoop): Event loop to run.
497
+ ready_event (ThreadEvent): Event set once the loop is running.
498
+ """
499
+ asyncio.set_event_loop(loop)
500
+ ready_event.set()
501
+ loop.run_forever()
502
+
503
+ def _ensure_loop(self) -> asyncio.AbstractEventLoop:
504
+ """Ensure a background event loop exists for PyTAK tasks."""
505
+
506
+ with self._loop_lock:
507
+ if self._loop is not None and self._loop.is_running():
508
+ return self._loop
509
+ loop = asyncio.new_event_loop()
510
+ self._loop = loop
511
+ self._loop_ready.clear()
512
+ thread = Thread(
513
+ target=self._start_loop, args=(loop, self._loop_ready), daemon=True
514
+ )
515
+ self._loop_thread = thread
516
+ thread.start()
517
+ self._loop_ready.wait()
518
+ return cast(asyncio.AbstractEventLoop, self._loop)
519
+
520
+ async def _run_in_loop(self, coro: Awaitable[Any]) -> Any:
521
+ """Execute a coroutine on the dedicated event loop and await it."""
522
+
523
+ loop = self._ensure_loop()
524
+ try:
525
+ running_loop = asyncio.get_running_loop()
526
+ except RuntimeError:
527
+ running_loop = None
528
+ if running_loop is loop:
529
+ return await coro
530
+ future = asyncio.run_coroutine_threadsafe(coro, loop)
531
+ return await asyncio.wrap_future(future)
532
+
533
+ async def stop(self) -> None:
534
+ """Stop the PyTAK worker manager and background loop."""
535
+
536
+ if self._worker_manager is not None:
537
+ await self._run_in_loop(self._worker_manager.stop())
538
+ self._worker_manager = None
539
+ if self._loop is not None and self._loop.is_running():
540
+ self._loop.call_soon_threadsafe(self._loop.stop)
541
+ if self._loop_thread is not None:
542
+ self._loop_thread.join(timeout=1.0)
543
+ self._loop = None
544
+ self._loop_thread = None
545
+
546
+ def _shutdown_sync(self) -> None:
547
+ """Best-effort cleanup for interpreter shutdown or GC."""
548
+
549
+ if self._loop is None or not self._loop.is_running():
550
+ self._loop = None
551
+ self._loop_thread = None
552
+ self._worker_manager = None
553
+ return
554
+
555
+ if self._worker_manager is not None:
556
+ try:
557
+ future = asyncio.run_coroutine_threadsafe(
558
+ self._worker_manager.stop(), self._loop
559
+ )
560
+ future.result(timeout=1.0)
561
+ except Exception: # pylint: disable=broad-exception-caught
562
+ pass
563
+ self._worker_manager = None
564
+
565
+ self._loop.call_soon_threadsafe(self._loop.stop)
566
+ if self._loop_thread is not None:
567
+ self._loop_thread.join(timeout=1.0)
568
+ self._loop = None
569
+ self._loop_thread = None