plexus-python 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. plexus/__init__.py +31 -0
  2. plexus/__main__.py +4 -0
  3. plexus/adapters/__init__.py +122 -0
  4. plexus/adapters/base.py +409 -0
  5. plexus/adapters/ble.py +257 -0
  6. plexus/adapters/can.py +439 -0
  7. plexus/adapters/can_detect.py +174 -0
  8. plexus/adapters/mavlink.py +642 -0
  9. plexus/adapters/mavlink_detect.py +192 -0
  10. plexus/adapters/modbus.py +622 -0
  11. plexus/adapters/mqtt.py +350 -0
  12. plexus/adapters/opcua.py +607 -0
  13. plexus/adapters/registry.py +206 -0
  14. plexus/adapters/serial_adapter.py +547 -0
  15. plexus/buffer.py +257 -0
  16. plexus/cameras/__init__.py +57 -0
  17. plexus/cameras/auto.py +239 -0
  18. plexus/cameras/base.py +189 -0
  19. plexus/cameras/picamera.py +171 -0
  20. plexus/cameras/usb.py +143 -0
  21. plexus/cli.py +783 -0
  22. plexus/client.py +465 -0
  23. plexus/config.py +169 -0
  24. plexus/connector.py +666 -0
  25. plexus/deps.py +246 -0
  26. plexus/detect.py +1238 -0
  27. plexus/importers/__init__.py +25 -0
  28. plexus/importers/rosbag.py +778 -0
  29. plexus/sensors/__init__.py +118 -0
  30. plexus/sensors/ads1115.py +164 -0
  31. plexus/sensors/adxl345.py +179 -0
  32. plexus/sensors/auto.py +290 -0
  33. plexus/sensors/base.py +412 -0
  34. plexus/sensors/bh1750.py +102 -0
  35. plexus/sensors/bme280.py +241 -0
  36. plexus/sensors/gps.py +317 -0
  37. plexus/sensors/ina219.py +149 -0
  38. plexus/sensors/magnetometer.py +239 -0
  39. plexus/sensors/mpu6050.py +162 -0
  40. plexus/sensors/sht3x.py +139 -0
  41. plexus/sensors/spi_scan.py +164 -0
  42. plexus/sensors/system.py +261 -0
  43. plexus/sensors/vl53l0x.py +109 -0
  44. plexus/streaming.py +743 -0
  45. plexus/tui.py +642 -0
  46. plexus_python-0.1.0.dist-info/METADATA +470 -0
  47. plexus_python-0.1.0.dist-info/RECORD +50 -0
  48. plexus_python-0.1.0.dist-info/WHEEL +4 -0
  49. plexus_python-0.1.0.dist-info/entry_points.txt +2 -0
  50. plexus_python-0.1.0.dist-info/licenses/LICENSE +190 -0
plexus/connector.py ADDED
@@ -0,0 +1,666 @@
1
+ """
2
+ Plexus Device Connector
3
+
4
+ Connects devices to Plexus via WebSocket for real-time streaming.
5
+
6
+ Data Flow:
7
+ ┌─────────────────────────────────────────────────────────────────┐
8
+ │ Device (this agent) │
9
+ │ │ │
10
+ │ ├──► WebSocket (PartyKit) ──► Dashboard (real-time view) │
11
+ │ │ │
12
+ │ └──► HTTP (/api/ingest) ──► ClickHouse (storage) │
13
+ │ (only when store=True OR draining backlog) │
14
+ └─────────────────────────────────────────────────────────────────┘
15
+
16
+ User Controls (from Dashboard UI):
17
+ - "View Live" → store=False → WebSocket only (free, no storage)
18
+ - "Record" → store=True → WebSocket + HTTP (uses storage quota)
19
+
20
+ Store-and-forward:
21
+ - When WebSocket disconnects, StreamManager buffers telemetry to SQLite
22
+ - On reconnect, connector drains the backlog via HTTP /api/ingest
23
+ - Both paths coexist: live WebSocket streaming + HTTP backlog drain
24
+
25
+ Authentication:
26
+ - API key (plx_*) is the auth method for all device connections
27
+ """
28
+
29
+ import asyncio
30
+ import gzip
31
+ import json
32
+ import logging
33
+ import os
34
+ import platform
35
+ import random
36
+ import socket
37
+ import time
38
+ from typing import Optional, Callable, List, Dict, Any, Tuple, TYPE_CHECKING
39
+
40
+ import websockets
41
+ from websockets.exceptions import ConnectionClosed
42
+
43
+ from plexus.config import get_api_key, get_endpoint, get_gateway_url, get_source_id, get_org_id, get_persistent_buffer
44
+ from plexus.buffer import SqliteBuffer
45
+ from plexus.streaming import StreamManager
46
+
47
+ if TYPE_CHECKING:
48
+ from plexus.sensors.base import SensorHub
49
+ from plexus.cameras.base import CameraHub
50
+ from plexus.adapters.can_detect import DetectedCAN
51
+ from plexus.adapters.mavlink_detect import DetectedMAVLink
52
+
53
+ logger = logging.getLogger(__name__)
54
+
55
+
56
+ class PlexusConnector:
57
+ """
58
+ WebSocket client that connects to Plexus for real-time data streaming.
59
+
60
+ Supports:
61
+ - Real-time sensor streaming (controlled from dashboard)
62
+ - Camera streaming
63
+ - Optional data persistence (when recording)
64
+ - Store-and-forward buffering for intermittent connectivity
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ api_key: Optional[str] = None,
70
+ endpoint: Optional[str] = None,
71
+ source_id: Optional[str] = None,
72
+ source_name: Optional[str] = None,
73
+ org_id: Optional[str] = None,
74
+ on_status: Optional[Callable[[str], None]] = None,
75
+ sensor_hub: Optional["SensorHub"] = None,
76
+ camera_hub: Optional["CameraHub"] = None,
77
+ can_adapters: Optional[List["DetectedCAN"]] = None,
78
+ mavlink_connections: Optional[List["DetectedMAVLink"]] = None,
79
+ max_reconnect_attempts: Optional[int] = None,
80
+ persistent_buffer: Optional[bool] = None,
81
+ buffer_path: Optional[str] = None,
82
+ ):
83
+ self.api_key = api_key or get_api_key()
84
+ self.endpoint = (endpoint or get_endpoint()).rstrip("/")
85
+ self.gateway_url = get_gateway_url()
86
+ self.source_id = source_id or get_source_id()
87
+ self.source_name = source_name
88
+ resolved_org_id = org_id or get_org_id() or self._resolve_org_id()
89
+ if not resolved_org_id:
90
+ raise ValueError(
91
+ "Could not resolve org_id from API key. "
92
+ "Check your API key and network connection, or set PLEXUS_ORG_ID."
93
+ )
94
+ self.org_id = resolved_org_id
95
+ self.on_status = on_status or (lambda x: None)
96
+ self.sensor_hub = sensor_hub
97
+ self.camera_hub = camera_hub
98
+ self.can_adapters = can_adapters
99
+ self.mavlink_connections = mavlink_connections
100
+ self.max_reconnect_attempts = max_reconnect_attempts
101
+
102
+ self._ws = None
103
+ self._running = False
104
+ self._authenticated = False
105
+ self._reconnect_count = 0
106
+ self._connect_time: float = 0.0
107
+ self._http_session: Optional[Any] = None
108
+ self._px: Optional[Any] = None # Plexus client ref for buffer flush
109
+ self._drain_task: Optional[asyncio.Task] = None
110
+
111
+ # Store-and-forward buffer
112
+ use_buffer = persistent_buffer if persistent_buffer is not None else get_persistent_buffer()
113
+ if use_buffer:
114
+ self._buffer = SqliteBuffer(
115
+ path=buffer_path,
116
+ max_size=None, # Unlimited — disk-bound for store-and-forward
117
+ max_bytes=500 * 1024 * 1024, # 500MB safety valve
118
+ )
119
+ logger.info("Store-and-forward enabled (SQLite buffer)")
120
+ else:
121
+ self._buffer = None
122
+
123
+ self._streams = StreamManager(
124
+ sensor_hub=sensor_hub,
125
+ camera_hub=camera_hub,
126
+ can_adapters=can_adapters,
127
+ mavlink_connections=mavlink_connections,
128
+ on_status=self.on_status,
129
+ error_report_fn=self.report_error,
130
+ buffer=self._buffer,
131
+ endpoint=self.endpoint,
132
+ api_key=self.api_key,
133
+ source_id=self.source_id,
134
+ )
135
+
136
+ # =========================================================================
137
+ # Heartbeat & Error Reporting
138
+ # =========================================================================
139
+
140
+ async def _heartbeat_loop(self, ws, interval=30):
141
+ """Send heartbeat every interval so server knows device is alive."""
142
+ from plexus import __version__
143
+ try:
144
+ while True:
145
+ await asyncio.sleep(interval)
146
+ await ws.send(json.dumps({
147
+ "type": "heartbeat",
148
+ "source_id": self.source_id,
149
+ "uptime_s": time.time() - self._connect_time,
150
+ "agent_version": __version__,
151
+ }))
152
+ except (asyncio.CancelledError, ConnectionClosed):
153
+ pass
154
+
155
+ async def report_error(self, source: str, error: str, severity: str = "warning"):
156
+ """Report device-side error to dashboard via WebSocket."""
157
+ if self._ws:
158
+ try:
159
+ await self._ws.send(json.dumps({
160
+ "type": "device_error",
161
+ "source_id": self.source_id,
162
+ "source": source,
163
+ "error": str(error),
164
+ "severity": severity,
165
+ "timestamp": time.time(),
166
+ }))
167
+ except Exception:
168
+ logger.debug("Failed to send error report to dashboard")
169
+
170
+ # =========================================================================
171
+ # Org ID Resolution
172
+ # =========================================================================
173
+
174
+ def _resolve_org_id(self) -> Optional[str]:
175
+ """Resolve org_id from the API key via the verify-key endpoint.
176
+
177
+ Caches the result in config so subsequent runs don't need the request.
178
+ """
179
+ if not self.api_key:
180
+ return None
181
+ try:
182
+ import requests
183
+ resp = requests.get(
184
+ f"{self.endpoint}/api/auth/verify-key",
185
+ headers={"x-api-key": self.api_key},
186
+ timeout=10,
187
+ )
188
+ if resp.status_code == 200:
189
+ org_id = resp.json().get("org_id")
190
+ if org_id:
191
+ # Cache in config for future runs
192
+ from plexus.config import load_config, save_config
193
+ config = load_config()
194
+ config["org_id"] = org_id
195
+ save_config(config)
196
+ return org_id
197
+ except Exception:
198
+ pass
199
+ return None
200
+
201
+ @staticmethod
202
+ def _get_local_ip() -> str:
203
+ """Get the local IP address of this device."""
204
+ try:
205
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
206
+ s.connect(("8.8.8.8", 80))
207
+ ip = s.getsockname()[0]
208
+ s.close()
209
+ return ip
210
+ except Exception:
211
+ return "unknown"
212
+
213
+ # =========================================================================
214
+ # Connection URLs
215
+ # =========================================================================
216
+
217
+ def _get_ws_url(self) -> str:
218
+ """Get WebSocket URL for the gateway."""
219
+ # 1. Explicit env var
220
+ ws_endpoint = os.environ.get("PLEXUS_WS_URL")
221
+ if ws_endpoint:
222
+ base = ws_endpoint.rstrip("/")
223
+ if "/ws/" in base:
224
+ return base
225
+ return f"{base}/ws/device"
226
+
227
+ # 2. Discover from API
228
+ try:
229
+ import requests
230
+ resp = requests.get(f"{self.endpoint}/api/config", timeout=5.0)
231
+ if resp.status_code == 200:
232
+ config = resp.json()
233
+ ws_url = config.get("ws_url")
234
+ if ws_url:
235
+ base = ws_url.rstrip("/")
236
+ if "/ws/" in base:
237
+ return base
238
+ return f"{base}/ws/device"
239
+ except Exception:
240
+ pass
241
+
242
+ # 3. Fallback: local dev gateway
243
+ return "ws://127.0.0.1:8787/ws/device"
244
+
245
+ # =========================================================================
246
+ # HTTP Persistence (for recording)
247
+ # =========================================================================
248
+
249
+ def _get_http_session(self):
250
+ """Get HTTP session for data persistence."""
251
+ if self._http_session is None:
252
+ import requests
253
+ self._http_session = requests.Session()
254
+ if self.api_key:
255
+ self._http_session.headers["x-api-key"] = self.api_key
256
+ self._http_session.headers["Content-Type"] = "application/json"
257
+ from plexus import __version__
258
+ self._http_session.headers["User-Agent"] = f"plexus-python/{__version__}"
259
+ return self._http_session
260
+
261
+ def _register_device(self, source_id: Optional[str] = None):
262
+ """Send sensor readings to /api/ingest to register the device and capture schema.
263
+
264
+ The AI dashboard generator needs 3+ metrics in the schema table.
265
+ Sensor data normally goes WebSocket-only, so we POST the first
266
+ reading to /api/ingest to seed the schema.
267
+ """
268
+ if not source_id or not self.api_key:
269
+ return
270
+ try:
271
+ points = []
272
+
273
+ # Read actual sensor data if available
274
+ if self.sensor_hub:
275
+ try:
276
+ readings = self.sensor_hub.read_all()
277
+ for r in readings:
278
+ if isinstance(r.value, (int, float)):
279
+ points.append({
280
+ "source_id": source_id,
281
+ "metric": r.metric,
282
+ "value": r.value,
283
+ })
284
+ except Exception as e:
285
+ logger.debug(f"Sensor read for registration failed: {e}")
286
+
287
+ # Always include a heartbeat
288
+ if not points:
289
+ points.append({
290
+ "source_id": source_id,
291
+ "metric": "_heartbeat",
292
+ "value": 1,
293
+ })
294
+
295
+ session = self._get_http_session()
296
+ session.post(
297
+ f"{self.gateway_url}/ingest",
298
+ json={"points": points},
299
+ timeout=5.0,
300
+ )
301
+ except Exception as e:
302
+ logger.debug(f"Device registration failed: {e}")
303
+
304
+ def _persist_points(self, points: List[Dict[str, Any]]) -> Tuple[bool, float]:
305
+ """Persist data points to ClickHouse via HTTP. Gzip-compressed for large payloads.
306
+
307
+ Returns (success, retry_after_seconds). retry_after is 0 on success,
308
+ >0 when rate-limited (429), or -1 on non-retryable failure.
309
+ """
310
+ if not self.api_key:
311
+ return False, -1
312
+
313
+ try:
314
+ formatted = [
315
+ {
316
+ "metric": p["metric"],
317
+ "value": p["value"],
318
+ "source_id": self.source_id,
319
+ "timestamp": p.get("timestamp", int(time.time() * 1000)),
320
+ "tags": p.get("tags", {}),
321
+ }
322
+ for p in points
323
+ ]
324
+
325
+ payload = json.dumps({"points": formatted}).encode("utf-8")
326
+ session = self._get_http_session()
327
+ url = f"{self.gateway_url}/ingest"
328
+
329
+ # Gzip compress payloads > 1KB
330
+ if len(payload) > 1024:
331
+ body = gzip.compress(payload, compresslevel=6)
332
+ response = session.post(
333
+ url, data=body, timeout=10.0,
334
+ headers={
335
+ **session.headers,
336
+ "Content-Encoding": "gzip",
337
+ },
338
+ )
339
+ else:
340
+ response = session.post(url, data=payload, timeout=5.0)
341
+
342
+ if response.status_code == 429:
343
+ retry_after = float(response.headers.get("Retry-After", 30))
344
+ return False, retry_after
345
+ return response.status_code < 400, 0
346
+ except Exception as e:
347
+ logger.debug(f"Persist failed: {e}")
348
+ return False, 0
349
+
350
+ async def _persist_async(self, points: List[Dict[str, Any]]):
351
+ """Async wrapper - runs HTTP in thread pool."""
352
+ loop = asyncio.get_event_loop()
353
+ await loop.run_in_executor(None, self._persist_points, points)
354
+ # Return value (success, retry_after) intentionally ignored for
355
+ # fire-and-forget recording persistence
356
+
357
+ # =========================================================================
358
+ # Backlog Drain (Store-and-Forward)
359
+ # =========================================================================
360
+
361
+ async def _drain_backlog(self):
362
+ """Drain buffered telemetry to /api/ingest via HTTP.
363
+
364
+ Called on reconnect when the buffer has data from a disconnection period.
365
+ Runs as a background task alongside live WebSocket streaming.
366
+ Handles rate limits (429), re-buffers on failure, and stops on disconnect.
367
+ """
368
+ if not self._buffer:
369
+ return
370
+
371
+ total_drained = 0
372
+ initial_size = self._buffer.size()
373
+ if initial_size == 0:
374
+ return
375
+
376
+ self.on_status(f"Draining {initial_size:,} buffered points...")
377
+ loop = asyncio.get_event_loop()
378
+ consecutive_failures = 0
379
+
380
+ while self._running and self._authenticated:
381
+ try:
382
+ batch, remaining = await loop.run_in_executor(
383
+ None, self._buffer.drain, 5000
384
+ )
385
+ except Exception as e:
386
+ logger.debug(f"Buffer drain read failed: {e}")
387
+ break
388
+
389
+ if not batch:
390
+ break
391
+
392
+ success, retry_after = await loop.run_in_executor(
393
+ None, self._persist_points, batch
394
+ )
395
+
396
+ if not success:
397
+ # Re-buffer the failed batch
398
+ try:
399
+ self._buffer.add(batch)
400
+ except Exception:
401
+ pass
402
+
403
+ if retry_after > 0:
404
+ # Rate-limited — pause and retry
405
+ self.on_status(f"Backlog drain rate-limited, waiting {retry_after:.0f}s...")
406
+ await asyncio.sleep(retry_after)
407
+ consecutive_failures = 0
408
+ continue
409
+ elif retry_after < 0:
410
+ # Non-retryable (auth error, etc.) — stop completely
411
+ self.on_status("Backlog drain stopped (non-retryable error)")
412
+ break
413
+ else:
414
+ # Transient failure — retry with backoff
415
+ consecutive_failures += 1
416
+ if consecutive_failures >= 5:
417
+ self.on_status("Backlog drain paused (5 consecutive failures)")
418
+ break
419
+ backoff = min(2 ** consecutive_failures, 30)
420
+ self.on_status(f"Backlog upload failed, retrying in {backoff}s...")
421
+ await asyncio.sleep(backoff)
422
+ continue
423
+
424
+ consecutive_failures = 0
425
+ total_drained += len(batch)
426
+
427
+ if remaining > 0:
428
+ self.on_status(f"Backlog: {total_drained:,}/{initial_size:,} sent, {remaining:,} remaining")
429
+ else:
430
+ self.on_status(f"Backlog drained: {total_drained:,} points uploaded")
431
+
432
+ # Brief yield to avoid starving the event loop
433
+ await asyncio.sleep(0.05)
434
+
435
+ def _start_drain(self):
436
+ """Start backlog drain as a background task if buffer has data."""
437
+ if not self._buffer or self._buffer.size() == 0:
438
+ return
439
+ # Cancel any existing drain
440
+ if self._drain_task and not self._drain_task.done():
441
+ self._drain_task.cancel()
442
+ self._drain_task = asyncio.create_task(self._drain_backlog())
443
+
444
+ # =========================================================================
445
+ # WebSocket Connection
446
+ # =========================================================================
447
+
448
+ async def connect(self):
449
+ """Connect to Plexus and stream data.
450
+
451
+ Uses exponential backoff with jitter on reconnection:
452
+ 1s → 2s → 4s → 8s → ... → 60s max, with ±25% jitter.
453
+ Backoff resets after a successful connection that lasts >30s.
454
+ """
455
+ if not self.api_key:
456
+ raise ValueError("No API key. Run 'plexus start' first.")
457
+
458
+ ws_url = self._get_ws_url()
459
+ self.on_status(f"Connecting to {ws_url}...")
460
+
461
+ self._running = True
462
+ self._reconnect_count = 0
463
+ backoff = 1.0
464
+ max_backoff = 60.0
465
+
466
+ while self._running:
467
+ connected_at = time.monotonic()
468
+ try:
469
+ async with websockets.connect(ws_url, ping_interval=30, ping_timeout=10) as ws:
470
+ self._ws = ws
471
+ self._authenticated = False
472
+ self._connect_time = time.time()
473
+
474
+ # Reset backoff and reconnect counter after stable connection (>30s)
475
+ backoff = 1.0
476
+
477
+ # Build auth message with device metadata
478
+ from plexus import __version__
479
+ auth_msg = {
480
+ "type": "device_auth",
481
+ "api_key": self.api_key,
482
+ "source_id": self.source_id,
483
+ "source_name": self.source_name,
484
+ "platform": platform.system(),
485
+ "agent_version": __version__,
486
+ "hostname": socket.gethostname(),
487
+ "ip_addresses": [self._get_local_ip()],
488
+ "os_detail": f"{platform.system()} {platform.release()}",
489
+ "python_version": platform.python_version(),
490
+ "sensors": self.sensor_hub.get_info() if self.sensor_hub else [],
491
+ "cameras": self.camera_hub.get_info() if self.camera_hub else [],
492
+ "can": [
493
+ {"interface": c.interface, "channel": c.channel, "bitrate": c.bitrate}
494
+ for c in self.can_adapters
495
+ ] if self.can_adapters else [],
496
+ "mavlink": [
497
+ {"connection_string": m.connection_string, "transport": m.transport}
498
+ for m in self.mavlink_connections
499
+ ] if self.mavlink_connections else [],
500
+ }
501
+
502
+ await ws.send(json.dumps(auth_msg))
503
+ self.on_status("Authenticating...")
504
+
505
+ # Launch heartbeat alongside message listener
506
+ heartbeat_task = asyncio.create_task(self._heartbeat_loop(ws))
507
+ try:
508
+ async for message in ws:
509
+ await self._handle_message(message)
510
+ finally:
511
+ heartbeat_task.cancel()
512
+ try:
513
+ await heartbeat_task
514
+ except asyncio.CancelledError:
515
+ pass
516
+
517
+ except ConnectionClosed as e:
518
+ self.on_status(f"Disconnected: {e.reason}")
519
+ except Exception as e:
520
+ self.on_status(f"Error: {e}")
521
+
522
+ if self._running:
523
+ # Don't escalate backoff if connection was stable (>30s)
524
+ if time.monotonic() - connected_at < 30:
525
+ backoff = min(backoff * 2, max_backoff)
526
+ self._reconnect_count += 1
527
+ else:
528
+ backoff = 1.0
529
+ self._reconnect_count = 0
530
+
531
+ # Check max reconnect attempts
532
+ if self.max_reconnect_attempts is not None and self._reconnect_count >= self.max_reconnect_attempts:
533
+ self.on_status(f"Max reconnect attempts ({self.max_reconnect_attempts}) reached, giving up")
534
+ break
535
+
536
+ # Add ±25% jitter to prevent thundering herd
537
+ jitter = backoff * random.uniform(0.75, 1.25)
538
+ delay = min(jitter, max_backoff)
539
+ self.on_status(f"Reconnecting in {delay:.1f}s...")
540
+ await asyncio.sleep(delay)
541
+
542
+ async def _handle_message(self, message: str):
543
+ """Handle incoming WebSocket message."""
544
+ try:
545
+ data = json.loads(message)
546
+ msg_type = data.get("type")
547
+
548
+ if msg_type == "authenticated":
549
+ self._authenticated = True
550
+ self.on_status(f"Connected as {data.get('source_id')}")
551
+ # Register device via /api/ingest so it appears in the UI
552
+ self._register_device(data.get("source_id"))
553
+ # Drain any buffered telemetry from disconnection period
554
+ self._start_drain()
555
+ return
556
+
557
+ if msg_type == "error":
558
+ err_msg = data.get("message")
559
+ if err_msg:
560
+ self.on_status(f"Error: {err_msg}")
561
+ return
562
+
563
+ if not self._authenticated:
564
+ return
565
+
566
+ # Command handlers - delegate to focused modules
567
+ handlers = {
568
+ "start_stream": lambda d: self._streams.start_stream(d, self._ws),
569
+ "stop_stream": lambda d: self._streams.stop_stream(d),
570
+ "start_camera": lambda d: self._streams.start_camera(d, self._ws),
571
+ "stop_camera": lambda d: self._streams.stop_camera(d),
572
+ "start_can": lambda d: self._streams.start_can_stream(d, self._ws),
573
+ "stop_can": lambda d: self._streams.stop_can_stream(d),
574
+ "start_mavlink": lambda d: self._streams.start_mavlink_stream(d, self._ws),
575
+ "stop_mavlink": lambda d: self._streams.stop_mavlink_stream(d),
576
+ "mavlink_command": lambda d: self._streams.send_mavlink_command(d, self._ws),
577
+ "configure": lambda d: self._streams.configure_sensor(d),
578
+ "configure_camera": lambda d: self._streams.configure_camera(d),
579
+ "ping": lambda _: self._ws.send(json.dumps({"type": "pong"})),
580
+ }
581
+
582
+ handler = handlers.get(msg_type)
583
+ if handler:
584
+ result = handler(data)
585
+ if asyncio.iscoroutine(result):
586
+ await result
587
+
588
+ except json.JSONDecodeError:
589
+ self.on_status(f"Invalid message: {message}")
590
+
591
+ # =========================================================================
592
+ # Cleanup
593
+ # =========================================================================
594
+
595
+ def disconnect(self):
596
+ """Disconnect and cleanup, flushing any buffered telemetry."""
597
+ self._running = False
598
+ self._streams.cancel_all()
599
+
600
+ # Cancel any in-progress backlog drain
601
+ if self._drain_task and not self._drain_task.done():
602
+ self._drain_task.cancel()
603
+
604
+ # Flush any buffered points before closing
605
+ if self._px:
606
+ try:
607
+ self._px.flush_buffer()
608
+ except Exception:
609
+ logger.warning("Failed to flush buffer on shutdown")
610
+
611
+ self._ws = None
612
+
613
+ if self._http_session:
614
+ self._http_session.close()
615
+ self._http_session = None
616
+
617
+ # Log remaining buffer size on shutdown (don't close — data persists)
618
+ if self._buffer:
619
+ remaining = self._buffer.size()
620
+ if remaining > 0:
621
+ logger.info("Shutting down with %d points buffered (will drain on next start)", remaining)
622
+
623
+
624
+ def run_connector(
625
+ api_key: Optional[str] = None,
626
+ endpoint: Optional[str] = None,
627
+ source_id: Optional[str] = None,
628
+ source_name: Optional[str] = None,
629
+ on_status: Optional[Callable[[str], None]] = None,
630
+ sensor_hub: Optional["SensorHub"] = None,
631
+ camera_hub: Optional["CameraHub"] = None,
632
+ can_adapters: Optional[List["DetectedCAN"]] = None,
633
+ mavlink_connections: Optional[List["DetectedMAVLink"]] = None,
634
+ max_reconnect_attempts: Optional[int] = None,
635
+ persistent_buffer: Optional[bool] = None,
636
+ ):
637
+ """Run the connector (blocking). Handles SIGTERM for graceful shutdown."""
638
+ import signal
639
+
640
+ connector = PlexusConnector(
641
+ api_key=api_key,
642
+ endpoint=endpoint,
643
+ source_id=source_id,
644
+ source_name=source_name,
645
+ on_status=on_status,
646
+ sensor_hub=sensor_hub,
647
+ camera_hub=camera_hub,
648
+ can_adapters=can_adapters,
649
+ mavlink_connections=mavlink_connections,
650
+ max_reconnect_attempts=max_reconnect_attempts,
651
+ persistent_buffer=persistent_buffer,
652
+ )
653
+
654
+ def _handle_sigterm(signum, frame):
655
+ connector.disconnect()
656
+
657
+ # Signal handlers can only be set in the main thread (TUI runs
658
+ # the connector in a background thread, so skip it there).
659
+ import threading
660
+ if threading.current_thread() is threading.main_thread():
661
+ signal.signal(signal.SIGTERM, _handle_sigterm)
662
+
663
+ try:
664
+ asyncio.run(connector.connect())
665
+ except KeyboardInterrupt:
666
+ connector.disconnect()