matrice-streaming 0.1.14__py3-none-any.whl → 0.1.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. matrice_streaming/__init__.py +44 -32
  2. matrice_streaming/streaming_gateway/camera_streamer/__init__.py +68 -1
  3. matrice_streaming/streaming_gateway/camera_streamer/async_camera_worker.py +1388 -0
  4. matrice_streaming/streaming_gateway/camera_streamer/async_ffmpeg_worker.py +966 -0
  5. matrice_streaming/streaming_gateway/camera_streamer/camera_streamer.py +188 -24
  6. matrice_streaming/streaming_gateway/camera_streamer/device_detection.py +507 -0
  7. matrice_streaming/streaming_gateway/camera_streamer/encoding_pool_manager.py +136 -0
  8. matrice_streaming/streaming_gateway/camera_streamer/ffmpeg_camera_streamer.py +1048 -0
  9. matrice_streaming/streaming_gateway/camera_streamer/ffmpeg_config.py +192 -0
  10. matrice_streaming/streaming_gateway/camera_streamer/ffmpeg_worker_manager.py +470 -0
  11. matrice_streaming/streaming_gateway/camera_streamer/gstreamer_camera_streamer.py +1368 -0
  12. matrice_streaming/streaming_gateway/camera_streamer/gstreamer_worker.py +1063 -0
  13. matrice_streaming/streaming_gateway/camera_streamer/gstreamer_worker_manager.py +546 -0
  14. matrice_streaming/streaming_gateway/camera_streamer/message_builder.py +60 -15
  15. matrice_streaming/streaming_gateway/camera_streamer/nvdec.py +1330 -0
  16. matrice_streaming/streaming_gateway/camera_streamer/nvdec_worker_manager.py +412 -0
  17. matrice_streaming/streaming_gateway/camera_streamer/platform_pipelines.py +680 -0
  18. matrice_streaming/streaming_gateway/camera_streamer/stream_statistics.py +111 -4
  19. matrice_streaming/streaming_gateway/camera_streamer/video_capture_manager.py +223 -27
  20. matrice_streaming/streaming_gateway/camera_streamer/worker_manager.py +694 -0
  21. matrice_streaming/streaming_gateway/debug/__init__.py +27 -2
  22. matrice_streaming/streaming_gateway/debug/benchmark.py +727 -0
  23. matrice_streaming/streaming_gateway/debug/debug_gstreamer_gateway.py +599 -0
  24. matrice_streaming/streaming_gateway/debug/debug_streaming_gateway.py +245 -95
  25. matrice_streaming/streaming_gateway/debug/debug_utils.py +29 -0
  26. matrice_streaming/streaming_gateway/debug/test_videoplayback.py +318 -0
  27. matrice_streaming/streaming_gateway/dynamic_camera_manager.py +656 -39
  28. matrice_streaming/streaming_gateway/metrics_reporter.py +676 -139
  29. matrice_streaming/streaming_gateway/streaming_action.py +71 -20
  30. matrice_streaming/streaming_gateway/streaming_gateway.py +1026 -78
  31. matrice_streaming/streaming_gateway/streaming_gateway_utils.py +175 -20
  32. matrice_streaming/streaming_gateway/streaming_status_listener.py +89 -0
  33. {matrice_streaming-0.1.14.dist-info → matrice_streaming-0.1.65.dist-info}/METADATA +1 -1
  34. matrice_streaming-0.1.65.dist-info/RECORD +56 -0
  35. matrice_streaming-0.1.14.dist-info/RECORD +0 -38
  36. {matrice_streaming-0.1.14.dist-info → matrice_streaming-0.1.65.dist-info}/WHEEL +0 -0
  37. {matrice_streaming-0.1.14.dist-info → matrice_streaming-0.1.65.dist-info}/licenses/LICENSE.txt +0 -0
  38. {matrice_streaming-0.1.14.dist-info → matrice_streaming-0.1.65.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,412 @@
1
+ """NVDEC Worker Manager for StreamingGateway integration.
2
+
3
+ This module provides a simplified manager for the NVDEC hardware decoding backend.
4
+ Unlike other backends, NVDEC uses static camera configuration at startup and outputs
5
+ to CUDA IPC ring buffers (NV12 format) for zero-copy GPU inference pipelines.
6
+ """
7
+
8
+ import logging
9
+ import multiprocessing as mp
10
+ import time
11
+ from typing import Dict, List, Optional, Any
12
+
13
+ from .nvdec import (
14
+ nvdec_pool_process,
15
+ StreamConfig,
16
+ CUPY_AVAILABLE,
17
+ PYNVCODEC_AVAILABLE,
18
+ RING_BUFFER_AVAILABLE,
19
+ )
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def is_nvdec_available() -> bool:
25
+ """Check if NVDEC backend is available.
26
+
27
+ Requires:
28
+ - CuPy with CUDA support
29
+ - PyNvVideoCodec for NVDEC hardware decode
30
+ - cuda_shm_ring_buffer module for CUDA IPC
31
+ """
32
+ return CUPY_AVAILABLE and PYNVCODEC_AVAILABLE and RING_BUFFER_AVAILABLE
33
+
34
+
35
+ def get_available_gpu_count() -> int:
36
+ """Detect the number of available CUDA GPUs.
37
+
38
+ Returns:
39
+ Number of available GPUs, or 1 if detection fails.
40
+ """
41
+ if not CUPY_AVAILABLE:
42
+ return 1
43
+
44
+ try:
45
+ import cupy as cp
46
+ return cp.cuda.runtime.getDeviceCount()
47
+ except Exception as e:
48
+ logger.warning(f"Failed to detect GPU count: {e}, defaulting to 1")
49
+ return 1
50
+
51
+
52
+ class NVDECWorkerManager:
53
+ """Manager for NVDEC worker processes - static camera configuration.
54
+
55
+ This manager wraps the existing nvdec_pool_process function to integrate
56
+ with StreamingGateway. Key differences from other worker managers:
57
+
58
+ - Static camera configuration (no dynamic add/remove)
59
+ - Outputs to CUDA IPC ring buffers (not Redis/Kafka)
60
+ - NV12 format output (50% smaller than RGB)
61
+ - One worker process per GPU
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ camera_configs: List[Dict[str, Any]],
67
+ stream_config: Dict[str, Any], # Unused but kept for interface consistency
68
+ gpu_id: int = 0,
69
+ num_gpus: int = 0, # 0 = auto-detect all available GPUs
70
+ nvdec_pool_size: int = 8,
71
+ nvdec_burst_size: int = 4,
72
+ frame_width: int = 640,
73
+ frame_height: int = 640,
74
+ num_slots: int = 32,
75
+ target_fps: int = 0, # 0 = use per-camera FPS from config
76
+ duration_sec: float = 0, # 0 = infinite
77
+ ):
78
+ """Initialize NVDEC Worker Manager.
79
+
80
+ Args:
81
+ camera_configs: List of camera configuration dicts with keys:
82
+ - camera_id or stream_key: Unique identifier (used for ring buffer naming)
83
+ - source: Video file path or RTSP URL
84
+ - width: Optional frame width (default: frame_width)
85
+ - height: Optional frame height (default: frame_height)
86
+ - fps: FPS limit for this camera (used by default)
87
+ stream_config: Stream configuration (unused, for interface consistency)
88
+ gpu_id: Primary GPU device ID (starting GPU for round-robin assignment)
89
+ num_gpus: Number of GPUs to use (0 = auto-detect all available GPUs)
90
+ nvdec_pool_size: Number of NVDEC decoders per GPU
91
+ nvdec_burst_size: Frames per stream before rotating to next
92
+ frame_width: Default output frame width (used if camera config doesn't specify)
93
+ frame_height: Default output frame height (used if camera config doesn't specify)
94
+ num_slots: Ring buffer slots per camera
95
+ target_fps: Global FPS override (0 = use per-camera FPS from config)
96
+ duration_sec: Duration to run (0 = infinite until stop)
97
+ """
98
+ if not is_nvdec_available():
99
+ raise RuntimeError(
100
+ "NVDEC not available. Requires CuPy, PyNvVideoCodec, and cuda_shm_ring_buffer"
101
+ )
102
+
103
+ self.camera_configs = camera_configs
104
+ self.stream_config = stream_config
105
+ self.gpu_id = gpu_id
106
+
107
+ # Auto-detect GPUs if num_gpus is 0
108
+ if num_gpus <= 0:
109
+ detected_gpus = get_available_gpu_count()
110
+ self.num_gpus = min(detected_gpus, 8) # Max 8 GPUs
111
+ logger.info(f"Auto-detected {detected_gpus} GPU(s), using {self.num_gpus}")
112
+ else:
113
+ self.num_gpus = min(num_gpus, 8) # Max 8 GPUs
114
+ self.nvdec_pool_size = nvdec_pool_size
115
+ self.nvdec_burst_size = nvdec_burst_size
116
+ self.frame_width = frame_width
117
+ self.frame_height = frame_height
118
+ self.num_slots = num_slots
119
+ self.target_fps = target_fps
120
+ self.duration_sec = duration_sec if duration_sec > 0 else float('inf')
121
+
122
+ self._workers: List[mp.Process] = []
123
+ self._stop_event: Optional[mp.Event] = None
124
+ self._result_queue: Optional[mp.Queue] = None
125
+ self._shared_frame_count: Optional[mp.Value] = None
126
+ self._gpu_frame_counts: Dict[int, mp.Value] = {} # Per-GPU counters
127
+ self._start_time: Optional[float] = None
128
+ self._is_running = False
129
+
130
+ # Convert camera configs to StreamConfig objects and assign to GPUs
131
+ self._stream_configs: List[StreamConfig] = []
132
+ self._gpu_camera_assignments: Dict[int, List[StreamConfig]] = {
133
+ i: [] for i in range(self.num_gpus)
134
+ }
135
+ self._camera_to_gpu: Dict[str, int] = {}
136
+
137
+ self._prepare_camera_configs()
138
+
139
+ logger.info(
140
+ f"NVDECWorkerManager initialized: {len(camera_configs)} cameras, "
141
+ f"{self.num_gpus} GPU(s), pool_size={nvdec_pool_size}"
142
+ )
143
+
144
+ def _prepare_camera_configs(self):
145
+ """Convert dict configs to StreamConfig and distribute across GPUs.
146
+
147
+ Ring buffers are named using camera_id for SHM identification.
148
+ Per-camera FPS from config is used by default (target_fps=0 means use config FPS).
149
+ """
150
+ for i, config in enumerate(self.camera_configs):
151
+ # Extract camera ID (support both camera_id and stream_key)
152
+ # This ID is used for naming the CUDA IPC ring buffer
153
+ camera_id = config.get('camera_id') or config.get('stream_key') or f"cam_{i:04d}"
154
+
155
+ # Extract video source
156
+ source = config.get('source') or config.get('video_path')
157
+ if not source:
158
+ logger.warning(f"Camera {camera_id} has no source, skipping")
159
+ continue
160
+
161
+ # Extract dimensions (use per-camera config or fallback to defaults)
162
+ width = config.get('width') or self.frame_width
163
+ height = config.get('height') or self.frame_height
164
+
165
+ # Determine FPS: use global override if set, otherwise per-camera FPS from config
166
+ if self.target_fps > 0:
167
+ # Global FPS override is set
168
+ fps = self.target_fps
169
+ else:
170
+ # Use per-camera FPS from config (default streaming FPS)
171
+ fps = config.get('fps', 10) # Default to 10 FPS if not specified
172
+
173
+ # Assign to GPU (round-robin starting from gpu_id)
174
+ gpu_id = (self.gpu_id + i) % self.num_gpus
175
+
176
+ stream_config = StreamConfig(
177
+ camera_id=camera_id,
178
+ video_path=source,
179
+ width=width,
180
+ height=height,
181
+ target_fps=fps,
182
+ gpu_id=gpu_id,
183
+ )
184
+
185
+ self._stream_configs.append(stream_config)
186
+ self._gpu_camera_assignments[gpu_id].append(stream_config)
187
+ self._camera_to_gpu[camera_id] = gpu_id
188
+
189
+ logger.debug(f"Camera {camera_id}: source={source}, {width}x{height}@{fps}fps, GPU{gpu_id}")
190
+
191
+ def start(self) -> None:
192
+ """Start NVDEC worker processes (one per GPU)."""
193
+ if self._is_running:
194
+ logger.warning("NVDECWorkerManager is already running")
195
+ return
196
+
197
+ if not self._stream_configs:
198
+ logger.warning("No cameras configured, nothing to start")
199
+ return
200
+
201
+ ctx = mp.get_context("spawn")
202
+ self._stop_event = ctx.Event()
203
+ self._result_queue = ctx.Queue()
204
+ self._shared_frame_count = ctx.Value('L', 0) # Global counter (all GPUs)
205
+ self._start_time = time.perf_counter()
206
+
207
+ # Create per-GPU frame counters
208
+ self._gpu_frame_counts = {}
209
+ for gpu_id in range(self.num_gpus):
210
+ if self._gpu_camera_assignments[gpu_id]: # Only if GPU has cameras
211
+ self._gpu_frame_counts[gpu_id] = ctx.Value('L', 0)
212
+
213
+ total_num_streams = len(self._stream_configs)
214
+ total_num_gpus = len([g for g in range(self.num_gpus) if self._gpu_camera_assignments[g]])
215
+
216
+ logger.info(f"Starting NVDEC: {total_num_streams} cameras across {total_num_gpus} GPUs")
217
+
218
+ # Start one process per GPU that has cameras
219
+ for gpu_id in range(self.num_gpus):
220
+ gpu_cameras = self._gpu_camera_assignments[gpu_id]
221
+ if not gpu_cameras:
222
+ continue
223
+
224
+ p = ctx.Process(
225
+ target=nvdec_pool_process,
226
+ args=(
227
+ gpu_id, # process_id
228
+ gpu_cameras, # camera_configs (List[StreamConfig])
229
+ self.nvdec_pool_size, # pool_size
230
+ self.duration_sec, # duration_sec
231
+ self._result_queue, # result_queue
232
+ self._stop_event, # stop_event
233
+ self.nvdec_burst_size, # burst_size
234
+ self.num_slots, # num_slots
235
+ self.target_fps, # target_fps
236
+ self._shared_frame_count, # shared_frame_count (global)
237
+ self._gpu_frame_counts, # gpu_frame_counts (per-GPU dict)
238
+ total_num_streams, # total_num_streams
239
+ total_num_gpus, # total_num_gpus
240
+ ),
241
+ name=f"NVDECWorker-GPU{gpu_id}",
242
+ daemon=False,
243
+ )
244
+ p.start()
245
+ self._workers.append(p)
246
+ logger.info(f"Started NVDEC worker on GPU {gpu_id} with {len(gpu_cameras)} cameras")
247
+
248
+ self._is_running = True
249
+ logger.info(f"NVDECWorkerManager started: {len(self._workers)} workers")
250
+
251
+ def stop(self, timeout: float = 15.0) -> None:
252
+ """Stop all worker processes.
253
+
254
+ Args:
255
+ timeout: Maximum time to wait for each worker to stop gracefully
256
+ """
257
+ if not self._is_running:
258
+ logger.warning("NVDECWorkerManager is not running")
259
+ return
260
+
261
+ logger.info("Stopping NVDECWorkerManager...")
262
+
263
+ # Signal workers to stop
264
+ if self._stop_event:
265
+ self._stop_event.set()
266
+
267
+ # Wait for workers to finish
268
+ for p in self._workers:
269
+ p.join(timeout=timeout)
270
+ if p.is_alive():
271
+ logger.warning(f"Worker {p.name} did not stop gracefully, terminating")
272
+ p.terminate()
273
+ p.join(timeout=2.0)
274
+
275
+ self._workers.clear()
276
+ self._is_running = False
277
+ logger.info("NVDECWorkerManager stopped")
278
+
279
+ def get_worker_statistics(self) -> Dict[str, Any]:
280
+ """Return statistics from workers.
281
+
282
+ Returns:
283
+ Dict with keys:
284
+ - num_workers: Number of worker processes
285
+ - running_workers: Number of currently running workers
286
+ - total_cameras: Total cameras across all workers
287
+ - gpu_assignments: Cameras per GPU
288
+ - total_frames: Total frames processed (from shared counter)
289
+ - elapsed_sec: Time since start
290
+ - aggregate_fps: Overall FPS
291
+ - per_stream_fps: Average FPS per camera
292
+ - backend: 'nvdec'
293
+ - gpu_results: Per-GPU results from result queue
294
+ """
295
+ stats = {
296
+ 'backend': 'nvdec',
297
+ 'num_workers': len(self._workers),
298
+ 'running_workers': sum(1 for p in self._workers if p.is_alive()),
299
+ 'total_cameras': len(self._stream_configs),
300
+ 'gpu_assignments': {
301
+ gpu_id: len(cameras)
302
+ for gpu_id, cameras in self._gpu_camera_assignments.items()
303
+ },
304
+ 'nvdec_config': {
305
+ 'gpu_id': self.gpu_id,
306
+ 'num_gpus': self.num_gpus,
307
+ 'pool_size': self.nvdec_pool_size,
308
+ 'burst_size': self.nvdec_burst_size,
309
+ 'frame_size': f"{self.frame_width}x{self.frame_height}",
310
+ 'num_slots': self.num_slots,
311
+ 'target_fps': self.target_fps,
312
+ },
313
+ }
314
+
315
+ # Add frame count and FPS
316
+ if self._shared_frame_count:
317
+ total_frames = self._shared_frame_count.value
318
+ stats['total_frames'] = total_frames
319
+
320
+ if self._start_time:
321
+ elapsed = time.perf_counter() - self._start_time
322
+ stats['elapsed_sec'] = elapsed
323
+ stats['aggregate_fps'] = total_frames / elapsed if elapsed > 0 else 0
324
+ stats['per_stream_fps'] = (
325
+ stats['aggregate_fps'] / len(self._stream_configs)
326
+ if self._stream_configs else 0
327
+ )
328
+
329
+ # Add per-GPU frame counts and FPS
330
+ if self._gpu_frame_counts and self._start_time:
331
+ elapsed = time.perf_counter() - self._start_time
332
+ gpu_stats = {}
333
+ for gpu_id, counter in self._gpu_frame_counts.items():
334
+ gpu_frames = counter.value
335
+ num_cams = len(self._gpu_camera_assignments.get(gpu_id, []))
336
+ gpu_fps = gpu_frames / elapsed if elapsed > 0 else 0
337
+ gpu_per_cam = gpu_fps / num_cams if num_cams > 0 else 0
338
+ gpu_stats[f'GPU{gpu_id}'] = {
339
+ 'frames': gpu_frames,
340
+ 'cameras': num_cams,
341
+ 'fps': gpu_fps,
342
+ 'fps_per_cam': gpu_per_cam,
343
+ }
344
+ stats['per_gpu_stats'] = gpu_stats
345
+
346
+ # Collect any available results from queue (non-blocking)
347
+ gpu_results = []
348
+ if self._result_queue:
349
+ while True:
350
+ try:
351
+ result = self._result_queue.get_nowait()
352
+ gpu_results.append(result)
353
+ except:
354
+ break
355
+ stats['gpu_results'] = gpu_results
356
+
357
+ return stats
358
+
359
+ def get_camera_assignments(self) -> Dict[str, int]:
360
+ """Return mapping of camera_id to GPU ID.
361
+
362
+ Returns:
363
+ Dict mapping camera_id -> gpu_id
364
+ """
365
+ return self._camera_to_gpu.copy()
366
+
367
+ def add_camera(self, camera_config: Dict[str, Any]) -> bool:
368
+ """Not supported - NVDEC uses static camera configuration.
369
+
370
+ Raises:
371
+ NotImplementedError: Always raised
372
+ """
373
+ raise NotImplementedError(
374
+ "NVDEC backend uses static camera configuration. "
375
+ "Cameras must be configured at initialization."
376
+ )
377
+
378
+ def remove_camera(self, stream_key: str) -> bool:
379
+ """Not supported - NVDEC uses static camera configuration.
380
+
381
+ Raises:
382
+ NotImplementedError: Always raised
383
+ """
384
+ raise NotImplementedError(
385
+ "NVDEC backend uses static camera configuration. "
386
+ "Cameras cannot be removed at runtime."
387
+ )
388
+
389
+ def update_camera(self, camera_config: Dict[str, Any]) -> bool:
390
+ """Not supported - NVDEC uses static camera configuration.
391
+
392
+ Raises:
393
+ NotImplementedError: Always raised
394
+ """
395
+ raise NotImplementedError(
396
+ "NVDEC backend uses static camera configuration. "
397
+ "Cameras cannot be updated at runtime."
398
+ )
399
+
400
+ @property
401
+ def is_running(self) -> bool:
402
+ """Check if the manager is currently running."""
403
+ return self._is_running
404
+
405
+ def __enter__(self):
406
+ """Context manager entry."""
407
+ self.start()
408
+ return self
409
+
410
+ def __exit__(self, exc_type, exc_val, exc_tb):
411
+ """Context manager exit."""
412
+ self.stop()