matrice-inference 0.1.0__py3-none-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-inference might be problematic. Click here for more details.
- matrice_inference/deploy/aggregator/aggregator.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/aggregator/aggregator.pyi +55 -0
- matrice_inference/deploy/aggregator/analytics.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/aggregator/analytics.pyi +63 -0
- matrice_inference/deploy/aggregator/ingestor.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/aggregator/ingestor.pyi +79 -0
- matrice_inference/deploy/aggregator/pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/aggregator/pipeline.pyi +139 -0
- matrice_inference/deploy/aggregator/publisher.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/aggregator/publisher.pyi +59 -0
- matrice_inference/deploy/aggregator/synchronizer.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/aggregator/synchronizer.pyi +58 -0
- matrice_inference/deploy/client/auto_streaming/auto_streaming.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/client/auto_streaming/auto_streaming.pyi +145 -0
- matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.pyi +126 -0
- matrice_inference/deploy/client/client.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/client/client.pyi +337 -0
- matrice_inference/deploy/client/client_stream_utils.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/client/client_stream_utils.pyi +83 -0
- matrice_inference/deploy/client/client_utils.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/client/client_utils.pyi +77 -0
- matrice_inference/deploy/client/streaming_gateway/streaming_gateway.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/client/streaming_gateway/streaming_gateway.pyi +120 -0
- matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.pyi +442 -0
- matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.pyi +19 -0
- matrice_inference/deploy/optimize/cache_manager.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/optimize/cache_manager.pyi +15 -0
- matrice_inference/deploy/optimize/frame_comparators.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/optimize/frame_comparators.pyi +203 -0
- matrice_inference/deploy/optimize/frame_difference.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/optimize/frame_difference.pyi +165 -0
- matrice_inference/deploy/optimize/transmission.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/optimize/transmission.pyi +97 -0
- matrice_inference/deploy/server/inference/batch_manager.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/inference/batch_manager.pyi +50 -0
- matrice_inference/deploy/server/inference/inference_interface.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/inference/inference_interface.pyi +114 -0
- matrice_inference/deploy/server/inference/model_manager.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/inference/model_manager.pyi +80 -0
- matrice_inference/deploy/server/inference/triton_utils.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/inference/triton_utils.pyi +115 -0
- matrice_inference/deploy/server/proxy/proxy_interface.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/proxy/proxy_interface.pyi +90 -0
- matrice_inference/deploy/server/proxy/proxy_utils.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/proxy/proxy_utils.pyi +113 -0
- matrice_inference/deploy/server/server.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/server.pyi +155 -0
- matrice_inference/deploy/server/stream/inference_worker.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/stream/inference_worker.pyi +56 -0
- matrice_inference/deploy/server/stream/kafka_consumer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/stream/kafka_consumer_worker.pyi +51 -0
- matrice_inference/deploy/server/stream/kafka_producer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/stream/kafka_producer_worker.pyi +50 -0
- matrice_inference/deploy/server/stream/stream_debug_logger.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/stream/stream_debug_logger.pyi +47 -0
- matrice_inference/deploy/server/stream/stream_manager.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/stream/stream_manager.pyi +69 -0
- matrice_inference/deploy/server/stream/video_buffer.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/server/stream/video_buffer.pyi +120 -0
- matrice_inference/deploy/stream/kafka_stream.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/stream/kafka_stream.pyi +444 -0
- matrice_inference/deploy/stream/redis_stream.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deploy/stream/redis_stream.pyi +447 -0
- matrice_inference/deployment/camera_manager.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deployment/camera_manager.pyi +669 -0
- matrice_inference/deployment/deployment.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deployment/deployment.pyi +736 -0
- matrice_inference/deployment/inference_pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deployment/inference_pipeline.pyi +527 -0
- matrice_inference/deployment/streaming_gateway_manager.cpython-312-x86_64-linux-gnu.so +0 -0
- matrice_inference/deployment/streaming_gateway_manager.pyi +275 -0
- matrice_inference/py.typed +0 -0
- matrice_inference-0.1.0.dist-info/METADATA +26 -0
- matrice_inference-0.1.0.dist-info/RECORD +80 -0
- matrice_inference-0.1.0.dist-info/WHEEL +5 -0
- matrice_inference-0.1.0.dist-info/licenses/LICENSE.txt +21 -0
- matrice_inference-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"""Auto-generated stub for module: auto_streaming_utils."""
|
|
2
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
from matrice_inference.deploy.client.streaming_gateway import ModelInputType, InputConfig, InputType
|
|
5
|
+
from matrice_inference.deployment.camera_manager import Camera, CameraGroup, CameraManager
|
|
6
|
+
from matrice_inference.deployment.streaming_gateway_manager import StreamingGateway
|
|
7
|
+
import logging
|
|
8
|
+
import time
|
|
9
|
+
|
|
10
|
+
# Classes
|
|
11
|
+
class AutoStreamingUtils:
|
|
12
|
+
"""
|
|
13
|
+
Utility class for auto streaming camera configuration and input conversion.
|
|
14
|
+
|
|
15
|
+
This class provides methods for converting camera configurations to input configurations,
|
|
16
|
+
managing streaming statistics, and validating gateway configurations.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self: Any, default_fps: int = 30, default_quality: int = 80, default_video_chunk_duration: int = 10, default_video_format: str = 'mp4', simulate_video_file_stream: bool = False) -> None: ...
|
|
20
|
+
"""
|
|
21
|
+
Initialize AutoStreamingUtils with default configuration values.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
default_fps: Default FPS for camera streams
|
|
25
|
+
default_quality: Default quality for camera streams
|
|
26
|
+
default_video_chunk_duration: Default video chunk duration for video input type
|
|
27
|
+
default_video_format: Default video format for video input type
|
|
28
|
+
simulate_video_file_stream: Whether to simulate video file stream
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def calculate_runtime_stats(stats: Dict) -> Dict: ...
|
|
32
|
+
"""
|
|
33
|
+
Calculate runtime statistics.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
stats: Statistics dictionary
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Updated statistics dictionary with runtime information
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def convert_camera_configs_to_inputs(self: Any, camera_configs: List[Camera], camera_groups: Dict[str, CameraGroup], deployment_id: str, model_input_type: Any = ModelInputType.FRAMES) -> List[InputConfig]: ...
|
|
43
|
+
"""
|
|
44
|
+
Convert camera configurations to input configurations for streaming.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
camera_configs: List of Camera instance objects
|
|
48
|
+
camera_groups: Dictionary mapping group IDs to CameraGroupInstance objects
|
|
49
|
+
deployment_id: Deployment ID for logging
|
|
50
|
+
model_input_type: Model input type (FRAMES or VIDEO)
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
List of InputConfig objects
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def create_auto_streaming_stats(streaming_gateway_ids: List[str]) -> Dict: ...
|
|
57
|
+
"""
|
|
58
|
+
Create initial statistics dictionary for auto streaming.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
streaming_gateway_ids: List of streaming gateway IDs
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Dictionary with initial statistics
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def get_camera_configs_as_inputs(self: Any, camera_manager: Any, deployment_id: str, model_input_type: Any = ModelInputType.FRAMES) -> Tuple[Optional[List[InputConfig]], Optional[str], str]: ...
|
|
68
|
+
"""
|
|
69
|
+
Get camera configurations for a deployment and convert them to input configurations.
|
|
70
|
+
|
|
71
|
+
This method fetches both camera groups and camera configs, then converts them
|
|
72
|
+
to input configs using effective stream settings.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
camera_manager: CameraManager instance
|
|
76
|
+
deployment_id: The ID of the deployment to get camera configs for
|
|
77
|
+
model_input_type: Model input type (FRAMES or VIDEO)
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
tuple: (input_configs, error, message)
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def get_gateway_cameras_as_inputs(self: Any, camera_manager: Any, streaming_gateway_config_instance: Any, model_input_type: Any = ModelInputType.FRAMES) -> Tuple[Optional[List[InputConfig]], Optional[str], str]: ...
|
|
84
|
+
"""
|
|
85
|
+
Get camera configurations for a specific streaming gateway and convert to input configs.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
camera_manager: CameraManager instance to use for camera operations
|
|
89
|
+
streaming_gateway_config_instance: StreamingGateway instance to use for gateway operations
|
|
90
|
+
model_input_type: Model input type (FRAMES or VIDEO)
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
tuple: (input_configs, error, message)
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
def record_error(stats: Dict, error_message: str) -> Any: ...
|
|
97
|
+
"""
|
|
98
|
+
Record an error in statistics.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
stats: Statistics dictionary
|
|
102
|
+
error_message: Error message to record
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
def update_stream_status(stats: Dict, gateway_id: str, status: str, camera_count: int = None) -> Any: ...
|
|
106
|
+
"""
|
|
107
|
+
Update the status of a streaming gateway in statistics.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
stats: Statistics dictionary
|
|
111
|
+
gateway_id: ID of the streaming gateway
|
|
112
|
+
status: New status (starting, running, stopped, failed)
|
|
113
|
+
camera_count: Number of cameras (optional)
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
def validate_streaming_gateway_config(gateway_config: Any) -> Tuple[bool, str]: ...
|
|
117
|
+
"""
|
|
118
|
+
Validate streaming gateway configuration.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
gateway_config: object
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
tuple: (is_valid, error_message)
|
|
125
|
+
"""
|
|
126
|
+
|
|
Binary file
|
|
@@ -0,0 +1,337 @@
|
|
|
1
|
+
"""Auto-generated stub for module: client."""
|
|
2
|
+
from typing import Any, Dict, Optional, Union
|
|
3
|
+
|
|
4
|
+
from matrice.projects import Projects
|
|
5
|
+
from matrice_inference.deploy.client.client_stream_utils import ClientStreamUtils
|
|
6
|
+
from matrice_inference.deploy.client.client_utils import ClientUtils
|
|
7
|
+
import logging
|
|
8
|
+
import time
|
|
9
|
+
|
|
10
|
+
# Classes
|
|
11
|
+
class MatriceDeployClient:
|
|
12
|
+
"""
|
|
13
|
+
Client for interacting with Matrice model deployments.
|
|
14
|
+
|
|
15
|
+
This client provides both synchronous and asynchronous methods for making
|
|
16
|
+
predictions and streaming video data to deployed models.
|
|
17
|
+
|
|
18
|
+
Example:
|
|
19
|
+
Basic usage:
|
|
20
|
+
```python
|
|
21
|
+
from matrice import Session
|
|
22
|
+
from matrice_inference.deploy.client import MatriceDeployClient
|
|
23
|
+
|
|
24
|
+
session = Session(account_number="...", access_key="...", secret_key="...")
|
|
25
|
+
client = MatriceDeployClient(
|
|
26
|
+
session=session,
|
|
27
|
+
deployment_id="your_deployment_id",
|
|
28
|
+
auth_key="your_auth_key"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Check if client is healthy
|
|
32
|
+
if client.is_healthy():
|
|
33
|
+
# Make a prediction
|
|
34
|
+
result = client.get_prediction(input_path="image.jpg")
|
|
35
|
+
print(result)
|
|
36
|
+
|
|
37
|
+
# Clean up resources
|
|
38
|
+
client.close()
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Streaming example:
|
|
42
|
+
```python
|
|
43
|
+
# Start streaming frames from webcam
|
|
44
|
+
success = client.start_stream(input=0, fps=30, quality=80)
|
|
45
|
+
if success:
|
|
46
|
+
# Consume results
|
|
47
|
+
while True:
|
|
48
|
+
result = client.consume_result(timeout=10.0)
|
|
49
|
+
if result:
|
|
50
|
+
print(f"Received result: {result}")
|
|
51
|
+
else:
|
|
52
|
+
break
|
|
53
|
+
|
|
54
|
+
# Stop streaming
|
|
55
|
+
client.stop_streaming()
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
Video streaming example:
|
|
59
|
+
```python
|
|
60
|
+
# Start streaming video chunks from webcam (5 second chunks)
|
|
61
|
+
success = client.start_video_stream(
|
|
62
|
+
input=0,
|
|
63
|
+
fps=30,
|
|
64
|
+
video_duration=5.0, # 5 second chunks
|
|
65
|
+
video_format="mp4"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Or stream video with frame count limit (150 frames per chunk)
|
|
69
|
+
success = client.start_video_stream(
|
|
70
|
+
input=0,
|
|
71
|
+
fps=30,
|
|
72
|
+
max_frames=150, # 150 frames per chunk
|
|
73
|
+
video_format="mp4"
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
if success:
|
|
77
|
+
# Consume video results
|
|
78
|
+
while True:
|
|
79
|
+
result = client.consume_result(timeout=30.0)
|
|
80
|
+
if result:
|
|
81
|
+
print(f"Received video result: {result}")
|
|
82
|
+
else:
|
|
83
|
+
break
|
|
84
|
+
|
|
85
|
+
# Stop streaming
|
|
86
|
+
client.stop_streaming()
|
|
87
|
+
```
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
def __init__(self: Any, session: Any, deployment_id: str, auth_key: str = None, create_deployment_config: Dict = None, consumer_group_id: str = None, consumer_group_instance_id: str = None) -> None: ...
|
|
91
|
+
"""
|
|
92
|
+
Initialize MatriceDeployClient.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
session: Session object for making RPC calls
|
|
96
|
+
deployment_id: ID of the deployment
|
|
97
|
+
auth_key: Authentication key
|
|
98
|
+
create_deployment_config: Deployment configuration
|
|
99
|
+
consumer_group_id: Kafka consumer group ID
|
|
100
|
+
consumer_group_instance_id: Unique consumer group instance ID to prevent rebalancing
|
|
101
|
+
|
|
102
|
+
Raises:
|
|
103
|
+
ValueError: If required parameters are missing or invalid
|
|
104
|
+
RuntimeError: If deployment info cannot be retrieved
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
async def aclose(self: Any) -> None: ...
|
|
108
|
+
"""
|
|
109
|
+
Close all client connections asynchronously and clean up resources.
|
|
110
|
+
|
|
111
|
+
This method should be called when you're done using the client
|
|
112
|
+
to properly clean up HTTP connections and other resources.
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
def close(self: Any) -> None: ...
|
|
116
|
+
"""
|
|
117
|
+
Close all client connections and clean up resources.
|
|
118
|
+
|
|
119
|
+
This method should be called when you're done using the client
|
|
120
|
+
to properly clean up HTTP connections and other resources.
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
async def close_stream(self: Any) -> None: ...
|
|
124
|
+
"""
|
|
125
|
+
Close streaming connections asynchronously.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
def consume_result(self: Any, timeout: float = 60.0) -> Optional[Dict]: ...
|
|
129
|
+
"""
|
|
130
|
+
Consume a result from the streaming session.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
timeout: Maximum time to wait for a result in seconds
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
Result dictionary if available, None if timeout
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
async def consume_result_async(self: Any, timeout: float = 60.0) -> Optional[Dict]: ...
|
|
140
|
+
"""
|
|
141
|
+
Consume a result from the streaming session asynchronously.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
timeout: Maximum time to wait for a result in seconds
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
Result dictionary if available, None if timeout
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
def create_auth_key_if_not_exists(self: Any, expiry_days: int = 30) -> str: ...
|
|
151
|
+
"""
|
|
152
|
+
Create an authentication key if one doesn't exist.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
expiry_days: Number of days until the key expires
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
str: The created authentication key
|
|
159
|
+
|
|
160
|
+
Raises:
|
|
161
|
+
ValueError: If expiry_days is invalid
|
|
162
|
+
RuntimeError: If key creation fails
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
def create_deployment(self: Any, deployment_name: Any, model_id: Any = '', gpu_required: Any = True, auto_scale: Any = False, auto_shutdown: Any = True, shutdown_threshold: Any = 5, compute_alias: Any = '', model_type: Any = 'trained', deployment_type: Any = 'regular', checkpoint_type: Any = 'pretrained', checkpoint_value: Any = '', checkpoint_dataset: Any = 'COCO', runtime_framework: Any = 'Pytorch', server_type: Any = 'fastapi', deployment_params: Any = {}, model_input: Any = 'image', model_output: Any = 'classification', suggested_classes: Any = [], model_family: Any = '', model_key: Any = '', is_kafka_enabled: Any = False, is_optimized: Any = False, instance_range: Any = [1, 1], custom_schedule: Any = False, schedule_deployment: Any = [], post_processing_config: Any = None, create_deployment_config: Dict = {}, wait_for_deployment: bool = True, max_wait_time: int = 1200) -> Any: ...
|
|
166
|
+
|
|
167
|
+
def get_deployment_info(self: Any) -> Dict: ...
|
|
168
|
+
"""
|
|
169
|
+
Get deployment information.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
Dict containing deployment information
|
|
173
|
+
|
|
174
|
+
Raises:
|
|
175
|
+
RuntimeError: If deployment info cannot be retrieved
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
def get_index_to_category(self: Any) -> Dict: ...
|
|
179
|
+
"""
|
|
180
|
+
Get index to category mapping.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Dict mapping indices to category names
|
|
184
|
+
|
|
185
|
+
Raises:
|
|
186
|
+
RuntimeError: If category mapping cannot be retrieved
|
|
187
|
+
"""
|
|
188
|
+
|
|
189
|
+
def get_prediction(self: Any, input_path: Optional[str] = None, input_bytes: Optional[bytes] = None, input_url: Optional[str] = None, extra_params: Optional[Dict] = None, auth_key: Optional[str] = None, apply_post_processing: bool = False) -> Union[Dict, str]: ...
|
|
190
|
+
"""
|
|
191
|
+
Get prediction from the deployed model.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
input_path: Path to input file
|
|
195
|
+
input_bytes: Input data as bytes
|
|
196
|
+
input_url: URL to input data
|
|
197
|
+
extra_params: Additional parameters for the prediction
|
|
198
|
+
auth_key: Authentication key (uses instance auth_key if not provided)
|
|
199
|
+
apply_post_processing: Whether to apply post-processing
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
Prediction result from the model
|
|
203
|
+
|
|
204
|
+
Raises:
|
|
205
|
+
ValueError: If no input is provided or auth key is missing
|
|
206
|
+
Exception: If prediction request fails
|
|
207
|
+
"""
|
|
208
|
+
|
|
209
|
+
async def get_prediction_async(self: Any, input_path: Optional[str] = None, input_bytes: Optional[bytes] = None, input_url: Optional[str] = None, extra_params: Optional[Dict] = None, auth_key: Optional[str] = None, apply_post_processing: bool = False) -> Union[Dict, str]: ...
|
|
210
|
+
"""
|
|
211
|
+
Get prediction from the deployed model asynchronously.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
input_path: Path to input file
|
|
215
|
+
input_bytes: Input data as bytes
|
|
216
|
+
input_url: URL to input data
|
|
217
|
+
extra_params: Additional parameters for the prediction
|
|
218
|
+
auth_key: Authentication key (uses instance auth_key if not provided)
|
|
219
|
+
apply_post_processing: Whether to apply post-processing
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
Prediction result from the model
|
|
223
|
+
|
|
224
|
+
Raises:
|
|
225
|
+
ValueError: If no input is provided or auth key is missing
|
|
226
|
+
Exception: If prediction request fails
|
|
227
|
+
"""
|
|
228
|
+
|
|
229
|
+
def get_status(self: Any) -> Dict: ...
|
|
230
|
+
"""
|
|
231
|
+
Get comprehensive status information about the client and deployment.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
Dict containing status information
|
|
235
|
+
"""
|
|
236
|
+
|
|
237
|
+
def is_healthy(self: Any) -> bool: ...
|
|
238
|
+
"""
|
|
239
|
+
Check if the deployment is healthy and ready to serve requests.
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
bool: True if deployment is healthy, False otherwise
|
|
243
|
+
"""
|
|
244
|
+
|
|
245
|
+
def refresh_instances_info(self: Any, force: bool = False) -> Any: ...
|
|
246
|
+
"""
|
|
247
|
+
Refresh instances information from the deployment.
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
force: Whether to force refresh regardless of time elapsed
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
def start_background_stream(self: Any, input: Union[str, int], fps: int = 10, stream_key: Optional[str] = None, stream_group_key: Optional[str] = None, quality: int = 95, width: Optional[int] = None, height: Optional[int] = None, simulate_video_file_stream: bool = False, is_video_chunk: bool = False, chunk_duration_seconds: Optional[float] = None, chunk_frames: Optional[int] = None) -> bool: ...
|
|
254
|
+
"""
|
|
255
|
+
Start a background streaming session.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
input: Video source (camera index, file path, or URL)
|
|
259
|
+
fps: Frames per second to stream
|
|
260
|
+
stream_key: Unique identifier for the stream
|
|
261
|
+
quality: JPEG compression quality (1-100)
|
|
262
|
+
width: Target frame width
|
|
263
|
+
height: Target frame height
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
bool: True if streaming started successfully, False otherwise
|
|
267
|
+
"""
|
|
268
|
+
|
|
269
|
+
def start_background_video_stream(self: Any, input: Union[str, int], fps: int = 10, stream_key: Optional[str] = None, stream_group_key: Optional[str] = None, quality: int = 95, width: Optional[int] = None, height: Optional[int] = None, video_duration: Optional[float] = None, max_frames: Optional[int] = None, video_format: str = 'mp4') -> bool: ...
|
|
270
|
+
"""
|
|
271
|
+
Start a background video streaming session that sends video chunks.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
input: Video source (camera index, file path, or URL)
|
|
275
|
+
fps: Frames per second to capture and encode
|
|
276
|
+
stream_key: Unique identifier for the stream
|
|
277
|
+
quality: Video compression quality (1-100)
|
|
278
|
+
width: Target frame width
|
|
279
|
+
height: Target frame height
|
|
280
|
+
video_duration: Duration of each video chunk in seconds (optional)
|
|
281
|
+
max_frames: Maximum number of frames per video chunk (optional)
|
|
282
|
+
video_format: Video format for encoding ('mp4', 'avi', 'webm')
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
bool: True if streaming started successfully, False otherwise
|
|
286
|
+
|
|
287
|
+
Note:
|
|
288
|
+
Either video_duration or max_frames should be specified to control chunk size.
|
|
289
|
+
If neither is provided, defaults to 5 second chunks.
|
|
290
|
+
"""
|
|
291
|
+
|
|
292
|
+
def start_stream(self: Any, input: Union[str, int], fps: int = 10, stream_key: Optional[str] = None, stream_group_key: Optional[str] = None, quality: int = 95, width: Optional[int] = None, height: Optional[int] = None, simulate_video_file_stream: bool = False, is_video_chunk: bool = False, chunk_duration_seconds: Optional[float] = None, chunk_frames: Optional[int] = None) -> bool: ...
|
|
293
|
+
"""
|
|
294
|
+
Start a streaming session (blocking).
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
input: Video source (camera index, file path, or URL)
|
|
298
|
+
fps: Frames per second to stream
|
|
299
|
+
stream_key: Unique identifier for the stream
|
|
300
|
+
quality: JPEG compression quality (1-100)
|
|
301
|
+
width: Target frame width
|
|
302
|
+
height: Target frame height
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
bool: True if streaming started successfully, False otherwise
|
|
306
|
+
"""
|
|
307
|
+
|
|
308
|
+
def start_video_stream(self: Any, input: Union[str, int], fps: int = 10, stream_key: Optional[str] = None, stream_group_key: Optional[str] = None, quality: int = 95, width: Optional[int] = None, height: Optional[int] = None, video_duration: Optional[float] = None, max_frames: Optional[int] = None, video_format: str = 'mp4') -> bool: ...
|
|
309
|
+
"""
|
|
310
|
+
Start a video streaming session (blocking) that sends video chunks.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
input: Video source (camera index, file path, or URL)
|
|
314
|
+
fps: Frames per second to capture and encode
|
|
315
|
+
stream_key: Unique identifier for the stream
|
|
316
|
+
quality: Video compression quality (1-100)
|
|
317
|
+
width: Target frame width
|
|
318
|
+
height: Target frame height
|
|
319
|
+
video_duration: Duration of each video chunk in seconds (optional)
|
|
320
|
+
max_frames: Maximum number of frames per video chunk (optional)
|
|
321
|
+
video_format: Video format for encoding ('mp4', 'avi', 'webm')
|
|
322
|
+
|
|
323
|
+
Returns:
|
|
324
|
+
bool: True if streaming started successfully, False otherwise
|
|
325
|
+
|
|
326
|
+
Note:
|
|
327
|
+
Either video_duration or max_frames should be specified to control chunk size.
|
|
328
|
+
If neither is provided, defaults to 5 second chunks.
|
|
329
|
+
"""
|
|
330
|
+
|
|
331
|
+
def stop_streaming(self: Any) -> None: ...
|
|
332
|
+
"""
|
|
333
|
+
Stop all streaming sessions.
|
|
334
|
+
"""
|
|
335
|
+
|
|
336
|
+
def wait_for_deployment(self: Any, timeout: Any = 1200) -> Any: ...
|
|
337
|
+
|
|
Binary file
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""Auto-generated stub for module: client_stream_utils."""
|
|
2
|
+
from typing import Any, Dict, Optional, Tuple, Union
|
|
3
|
+
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from matrice_inference.deploy.optimize.transmission import ClientTransmissionHandler
|
|
6
|
+
from matrice_inference.deploy.stream.kafka_stream import MatriceKafkaDeployment
|
|
7
|
+
import base64
|
|
8
|
+
import cv2
|
|
9
|
+
import hashlib
|
|
10
|
+
import logging
|
|
11
|
+
import numpy as np
|
|
12
|
+
import threading
|
|
13
|
+
import time
|
|
14
|
+
|
|
15
|
+
# Classes
|
|
16
|
+
class ClientStreamUtils:
|
|
17
|
+
def __init__(self: Any, session: Any, service_id: str, consumer_group_id: str = None, consumer_group_instance_id: str = None, threshold_a: float = 0.95, threshold_b: float = 0.85, enable_intelligent_transmission: bool = True) -> None: ...
|
|
18
|
+
"""
|
|
19
|
+
Initialize ClientStreamUtils.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
session: Session object for making RPC calls
|
|
23
|
+
service_id: ID of the deployment
|
|
24
|
+
consumer_group_id: Kafka consumer group ID
|
|
25
|
+
consumer_group_instance_id: Unique consumer group instance ID to prevent rebalancing
|
|
26
|
+
threshold_a: High similarity threshold for skipping transmission (default: 0.95)
|
|
27
|
+
threshold_b: Medium similarity threshold for difference transmission (default: 0.85)
|
|
28
|
+
enable_intelligent_transmission: Whether to enable intelligent frame transmission
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
async def async_consume_result(self: Any, timeout: float = 60.0) -> Optional[Dict]: ...
|
|
32
|
+
"""
|
|
33
|
+
Consume the Kafka stream result asynchronously.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
async def async_produce_request(self: Any, input_data: Any, stream_key: Optional[str] = None, stream_group_key: Optional[str] = None, metadata: Optional[Dict] = None, timeout: float = 60.0) -> bool: ...
|
|
37
|
+
"""
|
|
38
|
+
Produce a unified stream request to Kafka asynchronously.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
async def close(self: Any) -> None: ...
|
|
42
|
+
"""
|
|
43
|
+
Close all client connections including Kafka stream.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def consume_result(self: Any, timeout: float = 60.0) -> Optional[Dict]: ...
|
|
47
|
+
"""
|
|
48
|
+
Consume the Kafka stream result.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def get_transmission_stats(self: Any) -> Dict[str, Any]: ...
|
|
52
|
+
"""
|
|
53
|
+
Get intelligent transmission statistics.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Dictionary with transmission statistics
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def produce_request(self: Any, input_data: Any, stream_key: Optional[str] = None, stream_group_key: Optional[str] = None, metadata: Optional[Dict] = None, timeout: float = 60.0) -> bool: ...
|
|
60
|
+
"""
|
|
61
|
+
Simple function to produce a stream request to Kafka.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
def reset_transmission_stats(self: Any) -> None: ...
|
|
65
|
+
"""
|
|
66
|
+
Reset transmission statistics.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def start_background_stream(self: Any, input: Union[str, int], fps: int = 10, stream_key: Optional[str] = None, stream_group_key: Optional[str] = None, quality: int = 95, width: Optional[int] = None, height: Optional[int] = None, simulate_video_file_stream: bool = False, is_video_chunk: bool = False, chunk_duration_seconds: Optional[float] = None, chunk_frames: Optional[int] = None) -> bool: ...
|
|
70
|
+
"""
|
|
71
|
+
Start a stream input to the Kafka stream in a background thread.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
def start_stream(self: Any, input: Union[str, int], fps: int = 10, stream_key: Optional[str] = None, stream_group_key: Optional[str] = None, quality: int = 95, width: Optional[int] = None, height: Optional[int] = None, simulate_video_file_stream: bool = False, is_video_chunk: bool = False, chunk_duration_seconds: Optional[float] = None, chunk_frames: Optional[int] = None) -> bool: ...
|
|
75
|
+
"""
|
|
76
|
+
Start a stream input to the Kafka stream in the current thread.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def stop_streaming(self: Any) -> None: ...
|
|
80
|
+
"""
|
|
81
|
+
Stop all streaming threads.
|
|
82
|
+
"""
|
|
83
|
+
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""Auto-generated stub for module: client_utils."""
|
|
2
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
|
|
8
|
+
# Classes
|
|
9
|
+
class ClientUtils:
|
|
10
|
+
"""
|
|
11
|
+
Utility class for making inference requests to model servers.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(self: Any, clients: List[Dict] = None) -> None: ...
|
|
15
|
+
"""
|
|
16
|
+
Initialize HTTP clients.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
async def aclose(self: Any) -> None: ...
|
|
20
|
+
"""
|
|
21
|
+
Asynchronously close HTTP clients and clean up resources.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
async def async_inference(self: Any, auth_key: str = None, input_path: Optional[str] = None, input_bytes: Optional[bytes] = None, input_url: Optional[str] = None, extra_params: Optional[Dict] = None, apply_post_processing: bool = False, max_retries: int = 2) -> Union[Dict, str]: ...
|
|
25
|
+
"""
|
|
26
|
+
Make an asynchronous inference request with retry logic.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
auth_key: Authentication key
|
|
30
|
+
input_path: Path to input file
|
|
31
|
+
input_bytes: Input as bytes
|
|
32
|
+
input_url: URL to fetch input from
|
|
33
|
+
extra_params: Additional parameters to pass to model
|
|
34
|
+
apply_post_processing: Whether to apply post-processing
|
|
35
|
+
max_retries: Maximum number of retry attempts per client
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Model prediction result
|
|
39
|
+
|
|
40
|
+
Raises:
|
|
41
|
+
ValueError: If no input is provided
|
|
42
|
+
httpx.HTTPError: If HTTP request fails
|
|
43
|
+
Exception: If inference request fails
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def close(self: Any) -> None: ...
|
|
47
|
+
"""
|
|
48
|
+
Close HTTP clients and clean up resources.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def inference(self: Any, auth_key: str = None, input_path: Optional[str] = None, input_bytes: Optional[bytes] = None, input_url: Optional[str] = None, extra_params: Optional[Dict] = None, apply_post_processing: bool = False, max_retries: int = 2) -> Union[Dict, str]: ...
|
|
52
|
+
"""
|
|
53
|
+
Make a synchronous inference request with retry logic.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
auth_key: Authentication key
|
|
57
|
+
input_path: Path to input file
|
|
58
|
+
input_bytes: Input as bytes
|
|
59
|
+
input_url: URL to fetch input from
|
|
60
|
+
extra_params: Additional parameters to pass to model
|
|
61
|
+
apply_post_processing: Whether to apply post-processing
|
|
62
|
+
max_retries: Maximum number of retry attempts per client
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Model prediction result
|
|
66
|
+
|
|
67
|
+
Raises:
|
|
68
|
+
ValueError: If no input is provided
|
|
69
|
+
httpx.HTTPError: If HTTP request fails
|
|
70
|
+
Exception: If inference request fails
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def refresh_instances_info(self: Any, instances_info: List[Dict]) -> None: ...
|
|
74
|
+
"""
|
|
75
|
+
Update clients with new instances info.
|
|
76
|
+
"""
|
|
77
|
+
|