plexus-python 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plexus/__init__.py +31 -0
- plexus/__main__.py +4 -0
- plexus/adapters/__init__.py +122 -0
- plexus/adapters/base.py +409 -0
- plexus/adapters/ble.py +257 -0
- plexus/adapters/can.py +439 -0
- plexus/adapters/can_detect.py +174 -0
- plexus/adapters/mavlink.py +642 -0
- plexus/adapters/mavlink_detect.py +192 -0
- plexus/adapters/modbus.py +622 -0
- plexus/adapters/mqtt.py +350 -0
- plexus/adapters/opcua.py +607 -0
- plexus/adapters/registry.py +206 -0
- plexus/adapters/serial_adapter.py +547 -0
- plexus/buffer.py +257 -0
- plexus/cameras/__init__.py +57 -0
- plexus/cameras/auto.py +239 -0
- plexus/cameras/base.py +189 -0
- plexus/cameras/picamera.py +171 -0
- plexus/cameras/usb.py +143 -0
- plexus/cli.py +783 -0
- plexus/client.py +465 -0
- plexus/config.py +169 -0
- plexus/connector.py +666 -0
- plexus/deps.py +246 -0
- plexus/detect.py +1238 -0
- plexus/importers/__init__.py +25 -0
- plexus/importers/rosbag.py +778 -0
- plexus/sensors/__init__.py +118 -0
- plexus/sensors/ads1115.py +164 -0
- plexus/sensors/adxl345.py +179 -0
- plexus/sensors/auto.py +290 -0
- plexus/sensors/base.py +412 -0
- plexus/sensors/bh1750.py +102 -0
- plexus/sensors/bme280.py +241 -0
- plexus/sensors/gps.py +317 -0
- plexus/sensors/ina219.py +149 -0
- plexus/sensors/magnetometer.py +239 -0
- plexus/sensors/mpu6050.py +162 -0
- plexus/sensors/sht3x.py +139 -0
- plexus/sensors/spi_scan.py +164 -0
- plexus/sensors/system.py +261 -0
- plexus/sensors/vl53l0x.py +109 -0
- plexus/streaming.py +743 -0
- plexus/tui.py +642 -0
- plexus_python-0.1.0.dist-info/METADATA +470 -0
- plexus_python-0.1.0.dist-info/RECORD +50 -0
- plexus_python-0.1.0.dist-info/WHEEL +4 -0
- plexus_python-0.1.0.dist-info/entry_points.txt +2 -0
- plexus_python-0.1.0.dist-info/licenses/LICENSE +190 -0
plexus/buffer.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pluggable buffer backends for local point storage.
|
|
3
|
+
|
|
4
|
+
Two implementations:
|
|
5
|
+
- MemoryBuffer: In-memory list (default, matches original behavior)
|
|
6
|
+
- SqliteBuffer: WAL-mode SQLite for persistence across restarts
|
|
7
|
+
|
|
8
|
+
Store-and-forward:
|
|
9
|
+
Both backends support drain(batch_size) for incremental backlog upload.
|
|
10
|
+
SqliteBuffer can run uncapped (max_size=None) for intermittently connected
|
|
11
|
+
devices like satellites or field robots that buffer for hours/days.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import logging
|
|
16
|
+
import os
|
|
17
|
+
import sqlite3
|
|
18
|
+
import threading
|
|
19
|
+
from abc import ABC, abstractmethod
|
|
20
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class BufferBackend(ABC):
|
|
26
|
+
"""Abstract buffer backend for storing telemetry points locally."""
|
|
27
|
+
|
|
28
|
+
@abstractmethod
|
|
29
|
+
def add(self, points: List[Dict[str, Any]]) -> None:
|
|
30
|
+
"""Add points to the buffer, evicting oldest if over capacity."""
|
|
31
|
+
|
|
32
|
+
@abstractmethod
|
|
33
|
+
def get_all(self) -> List[Dict[str, Any]]:
|
|
34
|
+
"""Return a copy of all buffered points without clearing."""
|
|
35
|
+
|
|
36
|
+
@abstractmethod
|
|
37
|
+
def clear(self) -> None:
|
|
38
|
+
"""Remove all buffered points."""
|
|
39
|
+
|
|
40
|
+
@abstractmethod
|
|
41
|
+
def size(self) -> int:
|
|
42
|
+
"""Return current number of buffered points."""
|
|
43
|
+
|
|
44
|
+
def drain(self, batch_size: int = 5000) -> Tuple[List[Dict[str, Any]], int]:
|
|
45
|
+
"""Remove and return the oldest batch_size points atomically.
|
|
46
|
+
|
|
47
|
+
Returns (points, remaining_count). Points are deleted from the buffer
|
|
48
|
+
only by this call — if the caller fails to upload them, they are lost.
|
|
49
|
+
Use this for backlog drain where each batch is uploaded then confirmed.
|
|
50
|
+
|
|
51
|
+
Default implementation uses get_all/clear (non-atomic). Subclasses
|
|
52
|
+
should override with atomic implementations.
|
|
53
|
+
"""
|
|
54
|
+
all_pts = self.get_all()
|
|
55
|
+
batch = all_pts[:batch_size]
|
|
56
|
+
if not batch:
|
|
57
|
+
return [], 0
|
|
58
|
+
# Replace buffer with remaining points
|
|
59
|
+
self.clear()
|
|
60
|
+
remaining = all_pts[batch_size:]
|
|
61
|
+
if remaining:
|
|
62
|
+
self.add(remaining)
|
|
63
|
+
return batch, len(remaining)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class MemoryBuffer(BufferBackend):
|
|
67
|
+
"""In-memory buffer with FIFO eviction. Thread-safe.
|
|
68
|
+
|
|
69
|
+
This extracts the original behavior from Plexus client._failed_buffer.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(self, max_size: int = 10_000, on_overflow: Optional[Callable[[int], None]] = None):
|
|
73
|
+
self._max_size = max_size
|
|
74
|
+
self._on_overflow = on_overflow
|
|
75
|
+
self._buffer: List[Dict[str, Any]] = []
|
|
76
|
+
self._lock = threading.Lock()
|
|
77
|
+
|
|
78
|
+
def add(self, points: List[Dict[str, Any]]) -> None:
|
|
79
|
+
with self._lock:
|
|
80
|
+
self._buffer.extend(points)
|
|
81
|
+
if len(self._buffer) > self._max_size:
|
|
82
|
+
overflow = len(self._buffer) - self._max_size
|
|
83
|
+
logger.warning("Buffer full, dropped %d oldest points", overflow)
|
|
84
|
+
self._buffer = self._buffer[overflow:]
|
|
85
|
+
if self._on_overflow:
|
|
86
|
+
self._on_overflow(overflow)
|
|
87
|
+
|
|
88
|
+
def get_all(self) -> List[Dict[str, Any]]:
|
|
89
|
+
with self._lock:
|
|
90
|
+
return list(self._buffer)
|
|
91
|
+
|
|
92
|
+
def clear(self) -> None:
|
|
93
|
+
with self._lock:
|
|
94
|
+
self._buffer.clear()
|
|
95
|
+
|
|
96
|
+
def size(self) -> int:
|
|
97
|
+
with self._lock:
|
|
98
|
+
return len(self._buffer)
|
|
99
|
+
|
|
100
|
+
def drain(self, batch_size: int = 5000) -> Tuple[List[Dict[str, Any]], int]:
|
|
101
|
+
with self._lock:
|
|
102
|
+
batch = self._buffer[:batch_size]
|
|
103
|
+
self._buffer = self._buffer[batch_size:]
|
|
104
|
+
return batch, len(self._buffer)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class SqliteBuffer(BufferBackend):
|
|
108
|
+
"""SQLite-backed persistent buffer using WAL mode. Thread-safe.
|
|
109
|
+
|
|
110
|
+
Survives process restarts. Points are stored as JSON blobs in a single
|
|
111
|
+
table with auto-incrementing rowid for FIFO ordering.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
path: Path to the SQLite database file.
|
|
115
|
+
Defaults to ~/.plexus/buffer.db
|
|
116
|
+
max_size: Maximum number of points to retain. None = unlimited
|
|
117
|
+
(disk-bound, suitable for store-and-forward).
|
|
118
|
+
max_bytes: Maximum database file size in bytes. None = no limit.
|
|
119
|
+
Safety valve to prevent filling the disk.
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
def __init__(
|
|
123
|
+
self,
|
|
124
|
+
path: Optional[str] = None,
|
|
125
|
+
max_size: Optional[int] = 100_000,
|
|
126
|
+
max_bytes: Optional[int] = None,
|
|
127
|
+
on_overflow: Optional[Callable[[int], None]] = None,
|
|
128
|
+
):
|
|
129
|
+
self._max_size = max_size
|
|
130
|
+
self._max_bytes = max_bytes
|
|
131
|
+
self._on_overflow = on_overflow
|
|
132
|
+
self._lock = threading.Lock()
|
|
133
|
+
|
|
134
|
+
if path is None:
|
|
135
|
+
plexus_dir = os.path.join(os.path.expanduser("~"), ".plexus")
|
|
136
|
+
os.makedirs(plexus_dir, exist_ok=True)
|
|
137
|
+
try:
|
|
138
|
+
os.chmod(plexus_dir, 0o700)
|
|
139
|
+
except OSError:
|
|
140
|
+
pass # Windows or restricted filesystem
|
|
141
|
+
path = os.path.join(plexus_dir, "buffer.db")
|
|
142
|
+
|
|
143
|
+
self._path = path
|
|
144
|
+
self._conn = sqlite3.connect(path, check_same_thread=False)
|
|
145
|
+
self._conn.execute("PRAGMA journal_mode=WAL")
|
|
146
|
+
self._conn.execute(
|
|
147
|
+
"CREATE TABLE IF NOT EXISTS points ("
|
|
148
|
+
" id INTEGER PRIMARY KEY AUTOINCREMENT,"
|
|
149
|
+
" data TEXT NOT NULL"
|
|
150
|
+
")"
|
|
151
|
+
)
|
|
152
|
+
self._conn.commit()
|
|
153
|
+
|
|
154
|
+
def add(self, points: List[Dict[str, Any]]) -> None:
|
|
155
|
+
if not points:
|
|
156
|
+
return
|
|
157
|
+
with self._lock:
|
|
158
|
+
# Disk-space safety valve
|
|
159
|
+
if self._max_bytes is not None:
|
|
160
|
+
try:
|
|
161
|
+
if os.path.getsize(self._path) >= self._max_bytes:
|
|
162
|
+
logger.warning("Buffer at disk limit (%d bytes), dropping oldest", self._max_bytes)
|
|
163
|
+
self._evict_pct(10) # Drop 10% to make room
|
|
164
|
+
except OSError:
|
|
165
|
+
pass
|
|
166
|
+
self._conn.executemany(
|
|
167
|
+
"INSERT INTO points (data) VALUES (?)",
|
|
168
|
+
[(json.dumps(p),) for p in points],
|
|
169
|
+
)
|
|
170
|
+
self._conn.commit()
|
|
171
|
+
self._evict()
|
|
172
|
+
|
|
173
|
+
def get_all(self) -> List[Dict[str, Any]]:
|
|
174
|
+
with self._lock:
|
|
175
|
+
cursor = self._conn.execute("SELECT data FROM points ORDER BY id")
|
|
176
|
+
return [json.loads(row[0]) for row in cursor.fetchall()]
|
|
177
|
+
|
|
178
|
+
def clear(self) -> None:
|
|
179
|
+
with self._lock:
|
|
180
|
+
self._conn.execute("DELETE FROM points")
|
|
181
|
+
self._conn.commit()
|
|
182
|
+
|
|
183
|
+
def size(self) -> int:
|
|
184
|
+
with self._lock:
|
|
185
|
+
cursor = self._conn.execute("SELECT COUNT(*) FROM points")
|
|
186
|
+
return cursor.fetchone()[0]
|
|
187
|
+
|
|
188
|
+
def drain(self, batch_size: int = 5000) -> Tuple[List[Dict[str, Any]], int]:
|
|
189
|
+
"""Remove and return the oldest batch_size points atomically.
|
|
190
|
+
|
|
191
|
+
Uses a single transaction: SELECT then DELETE by rowid. If the process
|
|
192
|
+
crashes after drain() but before the caller uploads, points are gone —
|
|
193
|
+
this is the intended trade-off for simplicity. For crash-safe drain,
|
|
194
|
+
the caller should use smaller batch sizes.
|
|
195
|
+
"""
|
|
196
|
+
with self._lock:
|
|
197
|
+
cursor = self._conn.execute(
|
|
198
|
+
"SELECT id, data FROM points ORDER BY id LIMIT ?",
|
|
199
|
+
(batch_size,),
|
|
200
|
+
)
|
|
201
|
+
rows = cursor.fetchall()
|
|
202
|
+
if not rows:
|
|
203
|
+
return [], 0
|
|
204
|
+
|
|
205
|
+
points = [json.loads(row[1]) for row in rows]
|
|
206
|
+
ids = [row[0] for row in rows]
|
|
207
|
+
|
|
208
|
+
# Delete the exact rows we read
|
|
209
|
+
placeholders = ",".join("?" * len(ids))
|
|
210
|
+
self._conn.execute(
|
|
211
|
+
f"DELETE FROM points WHERE id IN ({placeholders})",
|
|
212
|
+
ids,
|
|
213
|
+
)
|
|
214
|
+
self._conn.commit()
|
|
215
|
+
|
|
216
|
+
remaining = self._conn.execute("SELECT COUNT(*) FROM points").fetchone()[0]
|
|
217
|
+
return points, remaining
|
|
218
|
+
|
|
219
|
+
def close(self) -> None:
|
|
220
|
+
"""Close the underlying database connection."""
|
|
221
|
+
with self._lock:
|
|
222
|
+
self._conn.close()
|
|
223
|
+
|
|
224
|
+
def _evict(self) -> None:
|
|
225
|
+
"""Remove oldest rows if over max_size. Must be called with lock held."""
|
|
226
|
+
if self._max_size is None:
|
|
227
|
+
return
|
|
228
|
+
cursor = self._conn.execute("SELECT COUNT(*) FROM points")
|
|
229
|
+
count = cursor.fetchone()[0]
|
|
230
|
+
if count > self._max_size:
|
|
231
|
+
overflow = count - self._max_size
|
|
232
|
+
self._conn.execute(
|
|
233
|
+
"DELETE FROM points WHERE id IN ("
|
|
234
|
+
" SELECT id FROM points ORDER BY id LIMIT ?"
|
|
235
|
+
")",
|
|
236
|
+
(overflow,),
|
|
237
|
+
)
|
|
238
|
+
self._conn.commit()
|
|
239
|
+
logger.warning("Buffer full, dropped %d oldest points", overflow)
|
|
240
|
+
if self._on_overflow:
|
|
241
|
+
self._on_overflow(overflow)
|
|
242
|
+
|
|
243
|
+
def _evict_pct(self, pct: int) -> None:
|
|
244
|
+
"""Evict a percentage of buffered points. Must be called with lock held."""
|
|
245
|
+
cursor = self._conn.execute("SELECT COUNT(*) FROM points")
|
|
246
|
+
count = cursor.fetchone()[0]
|
|
247
|
+
to_drop = max(1, count * pct // 100)
|
|
248
|
+
self._conn.execute(
|
|
249
|
+
"DELETE FROM points WHERE id IN ("
|
|
250
|
+
" SELECT id FROM points ORDER BY id LIMIT ?"
|
|
251
|
+
")",
|
|
252
|
+
(to_drop,),
|
|
253
|
+
)
|
|
254
|
+
self._conn.commit()
|
|
255
|
+
logger.warning("Disk safety: dropped %d oldest points", to_drop)
|
|
256
|
+
if self._on_overflow:
|
|
257
|
+
self._on_overflow(to_drop)
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Plexus camera drivers for streaming video from hardware devices.
|
|
3
|
+
|
|
4
|
+
Supports USB webcams and Raspberry Pi Camera Modules.
|
|
5
|
+
|
|
6
|
+
Usage:
|
|
7
|
+
# USB webcam
|
|
8
|
+
from plexus.cameras import USBCamera
|
|
9
|
+
camera = USBCamera(device_index=0)
|
|
10
|
+
frame = camera.capture()
|
|
11
|
+
|
|
12
|
+
# Auto-detect cameras
|
|
13
|
+
from plexus.cameras import scan_cameras, auto_cameras
|
|
14
|
+
detected = scan_cameras()
|
|
15
|
+
hub = auto_cameras()
|
|
16
|
+
|
|
17
|
+
Install camera support:
|
|
18
|
+
pip install plexus-python[camera] # USB webcams (OpenCV)
|
|
19
|
+
pip install plexus-python[picamera] # Raspberry Pi Camera
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from plexus.cameras.base import BaseCamera, CameraFrame, CameraHub
|
|
23
|
+
from plexus.cameras.auto import (
|
|
24
|
+
DetectedCamera,
|
|
25
|
+
scan_cameras,
|
|
26
|
+
scan_usb_cameras,
|
|
27
|
+
scan_pi_cameras,
|
|
28
|
+
auto_cameras,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Optional imports - only available if dependencies installed
|
|
32
|
+
try:
|
|
33
|
+
from plexus.cameras.usb import USBCamera
|
|
34
|
+
except ImportError:
|
|
35
|
+
USBCamera = None
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
from plexus.cameras.picamera import PiCamera
|
|
39
|
+
except ImportError:
|
|
40
|
+
PiCamera = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
__all__ = [
|
|
44
|
+
# Base classes
|
|
45
|
+
"BaseCamera",
|
|
46
|
+
"CameraFrame",
|
|
47
|
+
"CameraHub",
|
|
48
|
+
# Drivers (may be None if dependencies not installed)
|
|
49
|
+
"USBCamera",
|
|
50
|
+
"PiCamera",
|
|
51
|
+
# Auto-detection
|
|
52
|
+
"DetectedCamera",
|
|
53
|
+
"scan_cameras",
|
|
54
|
+
"scan_usb_cameras",
|
|
55
|
+
"scan_pi_cameras",
|
|
56
|
+
"auto_cameras",
|
|
57
|
+
]
|
plexus/cameras/auto.py
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Camera auto-detection utilities.
|
|
3
|
+
|
|
4
|
+
Scans for available cameras and creates appropriate drivers.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from typing import List, Optional, Set, Tuple, Type
|
|
11
|
+
|
|
12
|
+
from plexus.cameras.base import BaseCamera, CameraHub
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class DetectedCamera:
|
|
19
|
+
"""Information about a detected camera."""
|
|
20
|
+
name: str
|
|
21
|
+
device_id: str
|
|
22
|
+
driver: Type[BaseCamera]
|
|
23
|
+
description: str
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _get_pi_camera_v4l2_indices() -> Set[int]:
|
|
27
|
+
"""Get V4L2 device indices that belong to the Pi Camera pipeline.
|
|
28
|
+
|
|
29
|
+
On Raspberry Pi, the camera module exposes multiple /dev/video* devices
|
|
30
|
+
through the Unicam/ISP pipeline. These should not be opened by OpenCV
|
|
31
|
+
as they conflict with picamera2/libcamera access.
|
|
32
|
+
"""
|
|
33
|
+
pi_indices: Set[int] = set()
|
|
34
|
+
sysfs = "/sys/class/video4linux"
|
|
35
|
+
if not os.path.isdir(sysfs):
|
|
36
|
+
return pi_indices
|
|
37
|
+
for entry in os.listdir(sysfs):
|
|
38
|
+
if not entry.startswith("video"):
|
|
39
|
+
continue
|
|
40
|
+
try:
|
|
41
|
+
with open(os.path.join(sysfs, entry, "name")) as f:
|
|
42
|
+
name = f.read().strip().lower()
|
|
43
|
+
if "unicam" in name or "bcm2835" in name or "rp1" in name:
|
|
44
|
+
pi_indices.add(int(entry[5:])) # strip "video" prefix
|
|
45
|
+
except (IOError, OSError, ValueError):
|
|
46
|
+
pass
|
|
47
|
+
return pi_indices
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def scan_usb_cameras(
|
|
51
|
+
max_cameras: int = 10, skip_indices: Optional[Set[int]] = None
|
|
52
|
+
) -> List[DetectedCamera]:
|
|
53
|
+
"""
|
|
54
|
+
Scan for USB cameras using OpenCV.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
max_cameras: Maximum number of device indices to check.
|
|
58
|
+
skip_indices: V4L2 device indices to skip (e.g. Pi Camera devices).
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
List of detected USB cameras.
|
|
62
|
+
"""
|
|
63
|
+
try:
|
|
64
|
+
import cv2
|
|
65
|
+
except ImportError:
|
|
66
|
+
return []
|
|
67
|
+
|
|
68
|
+
from plexus.cameras.usb import USBCamera
|
|
69
|
+
|
|
70
|
+
detected = []
|
|
71
|
+
|
|
72
|
+
for i in range(max_cameras):
|
|
73
|
+
if skip_indices and i in skip_indices:
|
|
74
|
+
continue
|
|
75
|
+
cap = cv2.VideoCapture(i)
|
|
76
|
+
if cap.isOpened():
|
|
77
|
+
# Get camera info if available
|
|
78
|
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
79
|
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
80
|
+
cap.release()
|
|
81
|
+
|
|
82
|
+
detected.append(DetectedCamera(
|
|
83
|
+
name=f"USB Camera {i}",
|
|
84
|
+
device_id=f"usb:{i}",
|
|
85
|
+
driver=USBCamera,
|
|
86
|
+
description=f"USB webcam at index {i} ({width}x{height})",
|
|
87
|
+
))
|
|
88
|
+
else:
|
|
89
|
+
cap.release()
|
|
90
|
+
# Stop after first failed index (cameras are typically sequential)
|
|
91
|
+
if i > 0:
|
|
92
|
+
break
|
|
93
|
+
|
|
94
|
+
return detected
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def scan_pi_cameras() -> List[DetectedCamera]:
|
|
98
|
+
"""
|
|
99
|
+
Scan for Raspberry Pi cameras using picamera2.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
List of detected Pi cameras.
|
|
103
|
+
"""
|
|
104
|
+
try:
|
|
105
|
+
from picamera2 import Picamera2
|
|
106
|
+
except ImportError:
|
|
107
|
+
return []
|
|
108
|
+
|
|
109
|
+
from plexus.cameras.picamera import PiCamera
|
|
110
|
+
|
|
111
|
+
detected = []
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
# Get list of available cameras
|
|
115
|
+
camera_info = Picamera2.global_camera_info()
|
|
116
|
+
|
|
117
|
+
for i, info in enumerate(camera_info):
|
|
118
|
+
model = info.get("Model", "Unknown")
|
|
119
|
+
detected.append(DetectedCamera(
|
|
120
|
+
name=f"Pi Camera {i}",
|
|
121
|
+
device_id=f"picam:{i}",
|
|
122
|
+
driver=PiCamera,
|
|
123
|
+
description=f"Raspberry Pi Camera: {model}",
|
|
124
|
+
))
|
|
125
|
+
except Exception as e:
|
|
126
|
+
logger.debug(f"Pi camera scan failed: {e}")
|
|
127
|
+
|
|
128
|
+
return detected
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def scan_cameras() -> List[DetectedCamera]:
|
|
132
|
+
"""
|
|
133
|
+
Scan for all available cameras (USB and Pi).
|
|
134
|
+
|
|
135
|
+
Pi cameras are scanned first so their V4L2 devices can be excluded
|
|
136
|
+
from the USB scan, preventing duplicate detection and device conflicts.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
List of all detected cameras.
|
|
140
|
+
|
|
141
|
+
Raises:
|
|
142
|
+
ImportError: If neither picamera2 nor opencv-python is installed.
|
|
143
|
+
"""
|
|
144
|
+
has_picamera2 = True
|
|
145
|
+
has_cv2 = True
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
import picamera2 # noqa: F401
|
|
149
|
+
except ImportError:
|
|
150
|
+
has_picamera2 = False
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
import cv2 # noqa: F401
|
|
154
|
+
except ImportError:
|
|
155
|
+
has_cv2 = False
|
|
156
|
+
|
|
157
|
+
if not has_picamera2 and not has_cv2:
|
|
158
|
+
raise ImportError("No camera library installed (picamera2 or opencv-python)")
|
|
159
|
+
|
|
160
|
+
cameras = []
|
|
161
|
+
|
|
162
|
+
pi_cameras = scan_pi_cameras()
|
|
163
|
+
cameras.extend(pi_cameras)
|
|
164
|
+
|
|
165
|
+
# If Pi cameras exist, skip their V4L2 device indices so OpenCV
|
|
166
|
+
# doesn't open them (which blocks picamera2/libcamera access).
|
|
167
|
+
skip = _get_pi_camera_v4l2_indices() if pi_cameras else set()
|
|
168
|
+
cameras.extend(scan_usb_cameras(skip_indices=skip))
|
|
169
|
+
|
|
170
|
+
return cameras
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def auto_cameras(
|
|
174
|
+
frame_rate: Optional[float] = None,
|
|
175
|
+
resolution: Optional[Tuple[int, int]] = None,
|
|
176
|
+
quality: Optional[int] = None,
|
|
177
|
+
) -> CameraHub:
|
|
178
|
+
"""
|
|
179
|
+
Auto-detect cameras and create a CameraHub.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
frame_rate: Override frame rate for all cameras.
|
|
183
|
+
resolution: Override resolution for all cameras.
|
|
184
|
+
quality: Override JPEG quality for all cameras.
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
CameraHub with detected cameras added.
|
|
188
|
+
"""
|
|
189
|
+
hub = CameraHub()
|
|
190
|
+
detected = scan_cameras()
|
|
191
|
+
|
|
192
|
+
for camera_info in detected:
|
|
193
|
+
kwargs = {"camera_id": camera_info.device_id}
|
|
194
|
+
|
|
195
|
+
if frame_rate is not None:
|
|
196
|
+
kwargs["frame_rate"] = frame_rate
|
|
197
|
+
if resolution is not None:
|
|
198
|
+
kwargs["resolution"] = resolution
|
|
199
|
+
if quality is not None:
|
|
200
|
+
kwargs["quality"] = quality
|
|
201
|
+
|
|
202
|
+
# Create camera instance based on driver type
|
|
203
|
+
if camera_info.device_id.startswith("usb:"):
|
|
204
|
+
device_index = int(camera_info.device_id.split(":")[1])
|
|
205
|
+
kwargs["device_index"] = device_index
|
|
206
|
+
elif camera_info.device_id.startswith("picam:"):
|
|
207
|
+
camera_num = int(camera_info.device_id.split(":")[1])
|
|
208
|
+
kwargs["camera_num"] = camera_num
|
|
209
|
+
|
|
210
|
+
try:
|
|
211
|
+
camera = camera_info.driver(**kwargs)
|
|
212
|
+
hub.add(camera)
|
|
213
|
+
except Exception as e:
|
|
214
|
+
logger.debug(f"Failed to create {camera_info.name}: {e}")
|
|
215
|
+
|
|
216
|
+
return hub
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def get_camera_info() -> List[dict]:
|
|
220
|
+
"""
|
|
221
|
+
Get information about supported camera types.
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
List of camera type info dicts.
|
|
225
|
+
"""
|
|
226
|
+
return [
|
|
227
|
+
{
|
|
228
|
+
"name": "USB Camera",
|
|
229
|
+
"description": "USB webcams and built-in cameras via OpenCV",
|
|
230
|
+
"requires": "opencv-python",
|
|
231
|
+
"install": "pip install plexus-python[camera]",
|
|
232
|
+
},
|
|
233
|
+
{
|
|
234
|
+
"name": "Pi Camera",
|
|
235
|
+
"description": "Raspberry Pi Camera Module via picamera2",
|
|
236
|
+
"requires": "picamera2",
|
|
237
|
+
"install": "pip install plexus-python[picamera]",
|
|
238
|
+
},
|
|
239
|
+
]
|