matrice-inference 0.1.0__py3-none-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-inference might be problematic. Click here for more details.

Files changed (80) hide show
  1. matrice_inference/deploy/aggregator/aggregator.cpython-312-x86_64-linux-gnu.so +0 -0
  2. matrice_inference/deploy/aggregator/aggregator.pyi +55 -0
  3. matrice_inference/deploy/aggregator/analytics.cpython-312-x86_64-linux-gnu.so +0 -0
  4. matrice_inference/deploy/aggregator/analytics.pyi +63 -0
  5. matrice_inference/deploy/aggregator/ingestor.cpython-312-x86_64-linux-gnu.so +0 -0
  6. matrice_inference/deploy/aggregator/ingestor.pyi +79 -0
  7. matrice_inference/deploy/aggregator/pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
  8. matrice_inference/deploy/aggregator/pipeline.pyi +139 -0
  9. matrice_inference/deploy/aggregator/publisher.cpython-312-x86_64-linux-gnu.so +0 -0
  10. matrice_inference/deploy/aggregator/publisher.pyi +59 -0
  11. matrice_inference/deploy/aggregator/synchronizer.cpython-312-x86_64-linux-gnu.so +0 -0
  12. matrice_inference/deploy/aggregator/synchronizer.pyi +58 -0
  13. matrice_inference/deploy/client/auto_streaming/auto_streaming.cpython-312-x86_64-linux-gnu.so +0 -0
  14. matrice_inference/deploy/client/auto_streaming/auto_streaming.pyi +145 -0
  15. matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  16. matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.pyi +126 -0
  17. matrice_inference/deploy/client/client.cpython-312-x86_64-linux-gnu.so +0 -0
  18. matrice_inference/deploy/client/client.pyi +337 -0
  19. matrice_inference/deploy/client/client_stream_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  20. matrice_inference/deploy/client/client_stream_utils.pyi +83 -0
  21. matrice_inference/deploy/client/client_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  22. matrice_inference/deploy/client/client_utils.pyi +77 -0
  23. matrice_inference/deploy/client/streaming_gateway/streaming_gateway.cpython-312-x86_64-linux-gnu.so +0 -0
  24. matrice_inference/deploy/client/streaming_gateway/streaming_gateway.pyi +120 -0
  25. matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  26. matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.pyi +442 -0
  27. matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.cpython-312-x86_64-linux-gnu.so +0 -0
  28. matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.pyi +19 -0
  29. matrice_inference/deploy/optimize/cache_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  30. matrice_inference/deploy/optimize/cache_manager.pyi +15 -0
  31. matrice_inference/deploy/optimize/frame_comparators.cpython-312-x86_64-linux-gnu.so +0 -0
  32. matrice_inference/deploy/optimize/frame_comparators.pyi +203 -0
  33. matrice_inference/deploy/optimize/frame_difference.cpython-312-x86_64-linux-gnu.so +0 -0
  34. matrice_inference/deploy/optimize/frame_difference.pyi +165 -0
  35. matrice_inference/deploy/optimize/transmission.cpython-312-x86_64-linux-gnu.so +0 -0
  36. matrice_inference/deploy/optimize/transmission.pyi +97 -0
  37. matrice_inference/deploy/server/inference/batch_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  38. matrice_inference/deploy/server/inference/batch_manager.pyi +50 -0
  39. matrice_inference/deploy/server/inference/inference_interface.cpython-312-x86_64-linux-gnu.so +0 -0
  40. matrice_inference/deploy/server/inference/inference_interface.pyi +114 -0
  41. matrice_inference/deploy/server/inference/model_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  42. matrice_inference/deploy/server/inference/model_manager.pyi +80 -0
  43. matrice_inference/deploy/server/inference/triton_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  44. matrice_inference/deploy/server/inference/triton_utils.pyi +115 -0
  45. matrice_inference/deploy/server/proxy/proxy_interface.cpython-312-x86_64-linux-gnu.so +0 -0
  46. matrice_inference/deploy/server/proxy/proxy_interface.pyi +90 -0
  47. matrice_inference/deploy/server/proxy/proxy_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  48. matrice_inference/deploy/server/proxy/proxy_utils.pyi +113 -0
  49. matrice_inference/deploy/server/server.cpython-312-x86_64-linux-gnu.so +0 -0
  50. matrice_inference/deploy/server/server.pyi +155 -0
  51. matrice_inference/deploy/server/stream/inference_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  52. matrice_inference/deploy/server/stream/inference_worker.pyi +56 -0
  53. matrice_inference/deploy/server/stream/kafka_consumer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  54. matrice_inference/deploy/server/stream/kafka_consumer_worker.pyi +51 -0
  55. matrice_inference/deploy/server/stream/kafka_producer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  56. matrice_inference/deploy/server/stream/kafka_producer_worker.pyi +50 -0
  57. matrice_inference/deploy/server/stream/stream_debug_logger.cpython-312-x86_64-linux-gnu.so +0 -0
  58. matrice_inference/deploy/server/stream/stream_debug_logger.pyi +47 -0
  59. matrice_inference/deploy/server/stream/stream_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  60. matrice_inference/deploy/server/stream/stream_manager.pyi +69 -0
  61. matrice_inference/deploy/server/stream/video_buffer.cpython-312-x86_64-linux-gnu.so +0 -0
  62. matrice_inference/deploy/server/stream/video_buffer.pyi +120 -0
  63. matrice_inference/deploy/stream/kafka_stream.cpython-312-x86_64-linux-gnu.so +0 -0
  64. matrice_inference/deploy/stream/kafka_stream.pyi +444 -0
  65. matrice_inference/deploy/stream/redis_stream.cpython-312-x86_64-linux-gnu.so +0 -0
  66. matrice_inference/deploy/stream/redis_stream.pyi +447 -0
  67. matrice_inference/deployment/camera_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  68. matrice_inference/deployment/camera_manager.pyi +669 -0
  69. matrice_inference/deployment/deployment.cpython-312-x86_64-linux-gnu.so +0 -0
  70. matrice_inference/deployment/deployment.pyi +736 -0
  71. matrice_inference/deployment/inference_pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
  72. matrice_inference/deployment/inference_pipeline.pyi +527 -0
  73. matrice_inference/deployment/streaming_gateway_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  74. matrice_inference/deployment/streaming_gateway_manager.pyi +275 -0
  75. matrice_inference/py.typed +0 -0
  76. matrice_inference-0.1.0.dist-info/METADATA +26 -0
  77. matrice_inference-0.1.0.dist-info/RECORD +80 -0
  78. matrice_inference-0.1.0.dist-info/WHEEL +5 -0
  79. matrice_inference-0.1.0.dist-info/licenses/LICENSE.txt +21 -0
  80. matrice_inference-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,120 @@
1
+ """Auto-generated stub for module: streaming_gateway."""
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ from matrice_inference.deploy.client.client import MatriceDeployClient
5
+ from matrice_inference.deploy.client.client_stream_utils import ClientStreamUtils
6
+ from matrice_inference.deploy.client.streaming_gateway.streaming_gateway_utils import InputConfig, OutputConfig, InputType, ModelInputType, _RealTimeJsonEventPicker
7
+ from matrice_inference.deploy.client.streaming_gateway.streaming_results_handler import StreamingResultsHandler
8
+ import json
9
+ import logging
10
+ import threading
11
+ import time
12
+
13
+ # Classes
14
+ class StreamingGateway:
15
+ """
16
+ Simplified streaming gateway that leverages MatriceDeployClient's capabilities.
17
+
18
+ Supports both frame-based streaming (sending individual images) and video-based
19
+ streaming (sending video chunks) based on the model_input_type configuration.
20
+
21
+ Now includes optional post-processing capabilities for model results.
22
+
23
+ Prevents multiple deployments or background streams from being started simultaneously
24
+ using simple class-level tracking.
25
+
26
+ Example usage:
27
+ # Traditional usage with manual input config
28
+ frame_input = create_camera_frame_input(camera_index=0, fps=30)
29
+ video_input = create_camera_video_input(
30
+ camera_index=0,
31
+ fps=30,
32
+ video_duration=5.0, # 5-second chunks
33
+ video_format="mp4"
34
+ )
35
+
36
+ gateway = StreamingGateway(
37
+ session=session,
38
+ service_id="your_service_id",
39
+ inputs_config=[video_input],
40
+ output_config=output_config
41
+ )
42
+
43
+ gateway.start_streaming()
44
+
45
+ # To stop all streams from any instance:
46
+ StreamingGateway.stop_all_active_streams()
47
+ """
48
+
49
+ def __init__(self: Any, session: Any, service_id: str = None, inputs_config: List[InputConfig] = None, output_config: Any = None, json_event_picker: Any = _RealTimeJsonEventPicker(), create_deployment_config: Dict = None, auth_key: str = None, consumer_group_id: str = None, result_callback: Optional[Callable] = None, strip_input_from_result: bool = True, force_restart: bool = False) -> None: ...
50
+ """
51
+ Initialize StreamingGateway.
52
+
53
+ Args:
54
+ session: Session object for authentication
55
+ service_id: ID of existing deployment (optional if create_deployment_config provided)
56
+ inputs_config: Multiple input configurations (alternative to input_config)
57
+ output_config: Output configuration
58
+ create_deployment_config: Configuration for creating new deployment
59
+ auth_key: Authentication key for deployment
60
+ consumer_group_id: Kafka consumer group ID
61
+ result_callback: Optional callback function for processing results
62
+ strip_input_from_result: Whether to remove 'input' field from results to save space
63
+ force_restart: Whether to force stop existing streams and restart (use with caution)
64
+ """
65
+
66
+ def get_config(self: Any) -> Dict: ...
67
+ """
68
+ Get current configuration.
69
+
70
+ Returns:
71
+ Dict with current configuration
72
+ """
73
+
74
+ def get_statistics(self: Any) -> Dict: ...
75
+ """
76
+ Get streaming statistics.
77
+
78
+ Returns:
79
+ Dict with streaming statistics
80
+ """
81
+
82
+ def load_config(cls: Any, filepath: str, session: Any = None, auth_key: str = None) -> Any: ...
83
+ """
84
+ Load configuration from file and create StreamingGateway.
85
+
86
+ Args:
87
+ filepath: Path to configuration file
88
+ session: Session object (required)
89
+ auth_key: Authentication key
90
+
91
+ Returns:
92
+ StreamingGateway instance
93
+ """
94
+
95
+ def save_config(self: Any, filepath: str) -> Any: ...
96
+ """
97
+ Save current configuration to file.
98
+
99
+ Args:
100
+ filepath: Path to save configuration
101
+ """
102
+
103
+ def start_streaming(self: Any, send_to_api: bool = False) -> bool: ...
104
+ """
105
+ Start streaming using MatriceDeployClient's built-in capabilities.
106
+
107
+ Returns:
108
+ bool: True if streaming started successfully, False otherwise
109
+ """
110
+
111
+ def stop_all_active_streams(self: Any) -> Any: ...
112
+ """
113
+ Stop all active streams across all deployments.
114
+ """
115
+
116
+ def stop_streaming(self: Any) -> None: ...
117
+ """
118
+ Stop all streaming operations.
119
+ """
120
+
@@ -0,0 +1,442 @@
1
+ """Auto-generated stub for module: streaming_gateway_utils."""
2
+ from typing import Any, Dict, List, Optional, Tuple, Union
3
+
4
+ from collections import deque
5
+ from dataclasses import dataclass, asdict
6
+ from enum import Enum
7
+ from matrice_inference.deploy.utils.post_processing.core.config import BaseConfig, AlertConfig, TrackingConfig, ZoneConfig
8
+ from urllib.parse import urlparse
9
+ import json
10
+ import os
11
+ import os
12
+ import re
13
+ import requests
14
+ import time
15
+ import urllib3
16
+ import uuid
17
+ import warnings
18
+
19
+ # Functions
20
+ def create_camera_frame_input(camera_index: int = 0, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None) -> Any: ...
21
+ """
22
+ Create a camera input for frame-based streaming.
23
+
24
+ Args:
25
+ camera_index: Camera device index
26
+ fps: Frames per second
27
+ quality: Image quality (1-100)
28
+ stream_key: Stream identifier
29
+ width: Frame width
30
+ height: Frame height
31
+
32
+ Returns:
33
+ InputConfig: Camera input configured for frame streaming
34
+ """
35
+ def create_camera_input(camera_index: int = 0, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, model_input_type: Any = ModelInputType.FRAMES, video_duration: float = None, max_frames: int = None, video_format: str = 'mp4') -> Any: ...
36
+ """
37
+ Create a camera input configuration.
38
+
39
+ Args:
40
+ camera_index: Camera device index (0 for default camera)
41
+ fps: Frames per second to capture
42
+ quality: Video/image quality (1-100)
43
+ stream_key: Unique identifier for the stream
44
+ width: Frame width in pixels
45
+ height: Frame height in pixels
46
+ model_input_type: FRAMES for individual images, VIDEO for video chunks
47
+ video_duration: Duration of video chunks in seconds (only for VIDEO mode)
48
+ max_frames: Maximum frames per video chunk (only for VIDEO mode)
49
+ video_format: Video format for encoding (mp4, avi, webm)
50
+
51
+ Returns:
52
+ InputConfig: Configured input for camera
53
+
54
+ Raises:
55
+ ValueError: If parameters are invalid
56
+ """
57
+ def create_camera_video_input(camera_index: int = 0, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, video_duration: float = 5.0, video_format: str = 'mp4') -> Any: ...
58
+ """
59
+ Create a camera input for video-based streaming with duration limit.
60
+
61
+ Args:
62
+ camera_index: Camera device index
63
+ fps: Frames per second
64
+ quality: Video quality (1-100)
65
+ stream_key: Stream identifier
66
+ width: Frame width
67
+ height: Frame height
68
+ video_duration: Duration of video chunks in seconds
69
+ video_format: Video format (mp4, avi, webm)
70
+
71
+ Returns:
72
+ InputConfig: Camera input configured for video streaming
73
+ """
74
+ def create_camera_video_input_by_frames(camera_index: int = 0, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, max_frames: int = 150, video_format: str = 'mp4') -> Any: ...
75
+ """
76
+ Create a camera input for video-based streaming with frame count limit.
77
+
78
+ Args:
79
+ camera_index: Camera device index
80
+ fps: Frames per second
81
+ quality: Video quality (1-100)
82
+ stream_key: Stream identifier
83
+ width: Frame width
84
+ height: Frame height
85
+ max_frames: Maximum frames per video chunk
86
+ video_format: Video format (mp4, avi, webm)
87
+
88
+ Returns:
89
+ InputConfig: Camera input configured for video streaming
90
+ """
91
+ def create_detection_post_processing_config(confidence_threshold: float = 0.6, enable_counting: bool = True, enable_alerting: bool = True, map_index_to_category: bool = False, index_to_category: Dict[int, str] = None, category_triggers: List[str] = None, count_threshold: int = None) -> Any: ...
92
+ """
93
+ Create a post-processing configuration optimized for object detection.
94
+
95
+ Args:
96
+ confidence_threshold: Global confidence threshold for filtering detections
97
+ enable_counting: Whether to enable object counting features
98
+ enable_alerting: Whether to enable alerting features
99
+ map_index_to_category: Whether to map category indices to names
100
+ index_to_category: Mapping from category indices to category names
101
+ category_triggers: List of categories that should trigger alerts
102
+ count_threshold: Threshold for triggering count-based alerts
103
+
104
+ Returns:
105
+ PostProcessingConfig optimized for detection models
106
+ """
107
+ def create_dual_output(file_directory: str, kafka_topic: str, kafka_bootstrap_servers: str, filename_pattern: str = None, max_files: int = None, kafka_key_field: str = 'stream_key', producer_config: Dict = None, post_processing_config: Any = None, apply_post_processing: bool = False, save_original_results: bool = True) -> Any: ...
108
+ """
109
+ Create a dual output configuration (both file and Kafka).
110
+
111
+ Args:
112
+ file_directory: Directory for file output
113
+ kafka_topic: Kafka topic name
114
+ kafka_bootstrap_servers: Kafka bootstrap servers
115
+ filename_pattern: Pattern for output filenames
116
+ max_files: Maximum number of files to keep
117
+ kafka_key_field: Field to use as Kafka message key
118
+ producer_config: Additional Kafka producer configuration
119
+
120
+ Returns:
121
+ OutputConfig instance for dual output
122
+ """
123
+ def create_file_output(directory: str, filename_pattern: str = None, max_files: int = None, post_processing_config: Any = None, apply_post_processing: bool = False, save_original_results: bool = True) -> Any: ...
124
+ """
125
+ Create a file output configuration.
126
+
127
+ Args:
128
+ directory: Output directory path
129
+ filename_pattern: Pattern for output filenames
130
+ max_files: Maximum number of files to keep
131
+ post_processing_config: Post-processing configuration (optional)
132
+ apply_post_processing: Whether to apply post-processing (default: False)
133
+ save_original_results: Whether to save original results alongside processed ones (default: True)
134
+
135
+ Returns:
136
+ OutputConfig instance for file output
137
+ """
138
+ def create_http_video_frame_input(video_url: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None) -> Any: ...
139
+ """
140
+ Create an HTTP video file input for frame-based streaming.
141
+
142
+ Args:
143
+ video_url: HTTP/HTTPS video file URL
144
+ fps: Frames per second
145
+ quality: Image quality (1-100)
146
+ stream_key: Stream identifier
147
+ width: Frame width
148
+ height: Frame height
149
+
150
+ Returns:
151
+ InputConfig: HTTP video input configured for frame streaming
152
+ """
153
+ def create_http_video_input(video_url: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, model_input_type: Any = ModelInputType.FRAMES, video_duration: float = None, max_frames: int = None, video_format: str = 'mp4') -> Any: ...
154
+ """
155
+ Create an HTTP video file input configuration.
156
+
157
+ Args:
158
+ video_url: HTTP/HTTPS video file URL
159
+ fps: Frames per second to process
160
+ quality: Video/image quality (1-100)
161
+ stream_key: Unique identifier for the stream
162
+ width: Frame width in pixels
163
+ height: Frame height in pixels
164
+ model_input_type: FRAMES for individual images, VIDEO for video chunks
165
+ video_duration: Duration of video chunks in seconds (only for VIDEO mode)
166
+ max_frames: Maximum frames per video chunk (only for VIDEO mode)
167
+ video_format: Video format for encoding (mp4, avi, webm)
168
+
169
+ Returns:
170
+ InputConfig: Configured input for HTTP video file
171
+
172
+ Raises:
173
+ ValueError: If parameters are invalid
174
+ """
175
+ def create_http_video_video_input(video_url: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, video_duration: float = 5.0, video_format: str = 'mp4') -> Any: ...
176
+ """
177
+ Create an HTTP video file input for video-based streaming with duration limit.
178
+
179
+ Args:
180
+ video_url: HTTP/HTTPS video file URL
181
+ fps: Frames per second
182
+ quality: Video quality (1-100)
183
+ stream_key: Stream identifier
184
+ width: Frame width
185
+ height: Frame height
186
+ video_duration: Duration of video chunks in seconds
187
+ video_format: Video format (mp4, avi, webm)
188
+
189
+ Returns:
190
+ InputConfig: HTTP video input configured for video streaming
191
+ """
192
+ def create_http_video_video_input_by_frames(video_url: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, max_frames: int = 150, video_format: str = 'mp4') -> Any: ...
193
+ """
194
+ Create an HTTP video file input for video-based streaming with frame count limit.
195
+
196
+ Args:
197
+ video_url: HTTP/HTTPS video file URL
198
+ fps: Frames per second
199
+ quality: Video quality (1-100)
200
+ stream_key: Stream identifier
201
+ width: Frame width
202
+ height: Frame height
203
+ max_frames: Maximum frames per video chunk
204
+ video_format: Video format (mp4, avi, webm)
205
+
206
+ Returns:
207
+ InputConfig: HTTP video input configured for video streaming
208
+ """
209
+ def create_kafka_output(topic: str, bootstrap_servers: str, key_field: str = 'stream_key', producer_config: Dict = None) -> Any: ...
210
+ """
211
+ Create a Kafka output configuration.
212
+
213
+ Args:
214
+ topic: Kafka topic name
215
+ bootstrap_servers: Kafka bootstrap servers
216
+ key_field: Field to use as message key
217
+ producer_config: Additional Kafka producer configuration
218
+
219
+ Returns:
220
+ OutputConfig instance for Kafka output
221
+ """
222
+ def create_rtsp_input(rtsp_url: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, model_input_type: Any = ModelInputType.FRAMES, video_duration: float = None, max_frames: int = None, video_format: str = 'mp4') -> Any: ...
223
+ """
224
+ Create an RTSP stream input configuration.
225
+
226
+ Args:
227
+ rtsp_url: RTSP stream URL
228
+ fps: Frames per second to capture
229
+ quality: Video/image quality (1-100)
230
+ stream_key: Unique identifier for the stream
231
+ width: Frame width in pixels
232
+ height: Frame height in pixels
233
+ model_input_type: FRAMES for individual images, VIDEO for video chunks
234
+ video_duration: Duration of video chunks in seconds (only for VIDEO mode)
235
+ max_frames: Maximum frames per video chunk (only for VIDEO mode)
236
+ video_format: Video format for encoding (mp4, avi, webm)
237
+
238
+ Returns:
239
+ InputConfig: Configured input for RTSP stream
240
+
241
+ Raises:
242
+ ValueError: If parameters are invalid
243
+ """
244
+ def create_security_post_processing_config(person_confidence_threshold: float = 0.8, vehicle_confidence_threshold: float = 0.7, restricted_zones: Dict[str, List[Tuple[int, int]]] = None, entrance_lines: Dict[str, List[Tuple[int, int]]] = None, alert_on_person: bool = True, max_person_count: int = 5) -> Any: ...
245
+ """
246
+ Create a post-processing configuration optimized for security monitoring.
247
+
248
+ Args:
249
+ person_confidence_threshold: Confidence threshold for person detection
250
+ vehicle_confidence_threshold: Confidence threshold for vehicle detection
251
+ restricted_zones: Dictionary of restricted zone names to polygon coordinates
252
+ entrance_lines: Dictionary of entrance line names to line coordinates
253
+ alert_on_person: Whether to alert whenever a person is detected
254
+ max_person_count: Maximum allowed person count before triggering alert
255
+
256
+ Returns:
257
+ PostProcessingConfig optimized for security monitoring
258
+ """
259
+ def create_tracking_post_processing_config(confidence_threshold: float = 0.6, enable_tracking: bool = True, enable_counting: bool = True, enable_alerting: bool = True, tracking_zones: Dict[str, List[Tuple[int, int]]] = None, crossing_lines: Dict[str, List[Tuple[int, int]]] = None, map_index_to_category: bool = False, index_to_category: Dict[int, str] = None, category_triggers: List[str] = None) -> Any: ...
260
+ """
261
+ Create a post-processing configuration optimized for object tracking.
262
+
263
+ Args:
264
+ confidence_threshold: Global confidence threshold for filtering detections
265
+ enable_tracking: Whether to enable tracking features
266
+ enable_counting: Whether to enable object counting features
267
+ enable_alerting: Whether to enable alerting features
268
+ tracking_zones: Dictionary of zone names to polygon coordinates
269
+ crossing_lines: Dictionary of line names to line coordinates
270
+ map_index_to_category: Whether to map category indices to names
271
+ index_to_category: Mapping from category indices to category names
272
+ category_triggers: List of categories that should trigger alerts
273
+
274
+ Returns:
275
+ PostProcessingConfig optimized for tracking models
276
+ """
277
+ def create_video_frame_input(video_path: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None) -> Any: ...
278
+ """
279
+ Create a video file input for frame-based streaming.
280
+
281
+ Args:
282
+ video_path: Path to video file
283
+ fps: Frames per second
284
+ quality: Image quality (1-100)
285
+ stream_key: Stream identifier
286
+ width: Frame width
287
+ height: Frame height
288
+
289
+ Returns:
290
+ InputConfig: Video input configured for frame streaming
291
+ """
292
+ def create_video_input(video_path: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, model_input_type: Any = ModelInputType.FRAMES, video_duration: float = None, max_frames: int = None, video_format: str = 'mp4') -> Any: ...
293
+ """
294
+ Create a video file input configuration.
295
+
296
+ Args:
297
+ video_path: Path to the video file
298
+ fps: Frames per second to process
299
+ quality: Video/image quality (1-100)
300
+ stream_key: Unique identifier for the stream
301
+ width: Frame width in pixels
302
+ height: Frame height in pixels
303
+ model_input_type: FRAMES for individual images, VIDEO for video chunks
304
+ video_duration: Duration of video chunks in seconds (only for VIDEO mode)
305
+ max_frames: Maximum frames per video chunk (only for VIDEO mode)
306
+ video_format: Video format for encoding (mp4, avi, webm)
307
+
308
+ Returns:
309
+ InputConfig: Configured input for video file
310
+
311
+ Raises:
312
+ ValueError: If parameters are invalid
313
+ FileNotFoundError: If video file doesn't exist
314
+ """
315
+ def create_video_video_input(video_path: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, video_duration: float = 5.0, video_format: str = 'mp4') -> Any: ...
316
+ """
317
+ Create a video file input for video-based streaming with duration limit.
318
+
319
+ Args:
320
+ video_path: Path to video file
321
+ fps: Frames per second
322
+ quality: Video quality (1-100)
323
+ stream_key: Stream identifier
324
+ width: Frame width
325
+ height: Frame height
326
+ video_duration: Duration of video chunks in seconds
327
+ video_format: Video format (mp4, avi, webm)
328
+
329
+ Returns:
330
+ InputConfig: Video input configured for video streaming
331
+ """
332
+ def create_video_video_input_by_frames(video_path: str, fps: int = 30, quality: int = 95, stream_key: str = None, width: int = None, height: int = None, max_frames: int = 150, video_format: str = 'mp4') -> Any: ...
333
+ """
334
+ Create a video file input for video-based streaming with frame count limit.
335
+
336
+ Args:
337
+ video_path: Path to video file
338
+ fps: Frames per second
339
+ quality: Video quality (1-100)
340
+ stream_key: Stream identifier
341
+ width: Frame width
342
+ height: Frame height
343
+ max_frames: Maximum frames per video chunk
344
+ video_format: Video format (mp4, avi, webm)
345
+
346
+ Returns:
347
+ InputConfig: Video input configured for video streaming
348
+ """
349
+
350
+ # Classes
351
+ class FileOutputConfig:
352
+ """
353
+ Configuration for file output.
354
+ """
355
+
356
+ def from_dict(cls: Any, data: Dict) -> Any: ...
357
+ """
358
+ Create from dictionary.
359
+ """
360
+
361
+ def to_dict(self: Any) -> Dict: ...
362
+ """
363
+ Convert to dictionary.
364
+ """
365
+
366
+ class InputConfig:
367
+ """
368
+ Configuration for input sources.
369
+ """
370
+
371
+ def from_dict(cls: Any, data: Dict) -> Any: ...
372
+ """
373
+ Create from dictionary.
374
+ """
375
+
376
+ def to_dict(self: Any) -> Dict: ...
377
+ """
378
+ Convert to dictionary.
379
+ """
380
+
381
+ class InputType(Enum):
382
+ """
383
+ Supported input types.
384
+ """
385
+
386
+ AUTO: str
387
+ CAMERA: str
388
+ HTTP_STREAM: str
389
+ HTTP_VIDEO_FILE: str
390
+ RTSP_STREAM: str
391
+ VIDEO_FILE: str
392
+
393
+ pass
394
+ class KafkaOutputConfig:
395
+ """
396
+ Configuration for Kafka output.
397
+ """
398
+
399
+ def from_dict(cls: Any, data: Dict) -> Any: ...
400
+ """
401
+ Create from dictionary.
402
+ """
403
+
404
+ def to_dict(self: Any) -> Dict: ...
405
+ """
406
+ Convert to dictionary.
407
+ """
408
+
409
+ class ModelInputType(Enum):
410
+ """
411
+ Supported model input types.
412
+ """
413
+
414
+ FRAMES: str
415
+ VIDEO: str
416
+
417
+ pass
418
+ class OutputConfig:
419
+ """
420
+ Configuration for output destinations.
421
+ """
422
+
423
+ def from_dict(cls: Any, data: Dict) -> Any: ...
424
+ """
425
+ Create from dictionary.
426
+ """
427
+
428
+ def to_dict(self: Any) -> Dict: ...
429
+ """
430
+ Convert to dictionary.
431
+ """
432
+
433
+ class OutputType(Enum):
434
+ """
435
+ Supported output types.
436
+ """
437
+
438
+ BOTH: str
439
+ FILE: str
440
+ KAFKA: str
441
+
442
+ pass
@@ -0,0 +1,19 @@
1
+ """Auto-generated stub for module: streaming_results_handler."""
2
+ from typing import Any, Dict, Optional
3
+
4
+ from confluent_kafka import Producer
5
+ from datetime import datetime
6
+ from matrice_inference.deploy.client.client_stream_utils import ClientStreamUtils
7
+ from matrice_inference.deploy.client.streaming_gateway.streaming_gateway_utils import OutputType, OutputConfig, _RealTimeJsonEventPicker
8
+ from matrice_inference.deploy.utils.post_processing import PostProcessor
9
+ import json
10
+ import logging
11
+ import os
12
+ import threading
13
+ import time
14
+
15
+ # Classes
16
+ class StreamingResultsHandler:
17
+ def __init__(self: Any, client_stream_utils: Any, output_config: Any, json_event_picker: Any, service_id: str = None, strip_input_from_result: bool = True, result_callback: Optional[Callable] = None) -> None: ...
18
+
19
+ pass
@@ -0,0 +1,15 @@
1
+ """Auto-generated stub for module: cache_manager."""
2
+ from typing import Any, Dict
3
+
4
+ from collections import OrderedDict
5
+
6
+ # Classes
7
+ class CacheManager:
8
+ def __init__(self: Any, max_cache_size: int = 5) -> None: ...
9
+
10
+ def clear_cache(self: Any, stream_key: str = None) -> Any: ...
11
+
12
+ def get_cached_result(self: Any, input_hash: str, stream_key: str = None) -> Any: ...
13
+
14
+ def set_cached_result(self: Any, input_hash: str, value: dict, stream_key: str = None) -> Any: ...
15
+