matrice-inference 0.1.0__py3-none-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-inference might be problematic. Click here for more details.

Files changed (80) hide show
  1. matrice_inference/deploy/aggregator/aggregator.cpython-312-x86_64-linux-gnu.so +0 -0
  2. matrice_inference/deploy/aggregator/aggregator.pyi +55 -0
  3. matrice_inference/deploy/aggregator/analytics.cpython-312-x86_64-linux-gnu.so +0 -0
  4. matrice_inference/deploy/aggregator/analytics.pyi +63 -0
  5. matrice_inference/deploy/aggregator/ingestor.cpython-312-x86_64-linux-gnu.so +0 -0
  6. matrice_inference/deploy/aggregator/ingestor.pyi +79 -0
  7. matrice_inference/deploy/aggregator/pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
  8. matrice_inference/deploy/aggregator/pipeline.pyi +139 -0
  9. matrice_inference/deploy/aggregator/publisher.cpython-312-x86_64-linux-gnu.so +0 -0
  10. matrice_inference/deploy/aggregator/publisher.pyi +59 -0
  11. matrice_inference/deploy/aggregator/synchronizer.cpython-312-x86_64-linux-gnu.so +0 -0
  12. matrice_inference/deploy/aggregator/synchronizer.pyi +58 -0
  13. matrice_inference/deploy/client/auto_streaming/auto_streaming.cpython-312-x86_64-linux-gnu.so +0 -0
  14. matrice_inference/deploy/client/auto_streaming/auto_streaming.pyi +145 -0
  15. matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  16. matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.pyi +126 -0
  17. matrice_inference/deploy/client/client.cpython-312-x86_64-linux-gnu.so +0 -0
  18. matrice_inference/deploy/client/client.pyi +337 -0
  19. matrice_inference/deploy/client/client_stream_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  20. matrice_inference/deploy/client/client_stream_utils.pyi +83 -0
  21. matrice_inference/deploy/client/client_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  22. matrice_inference/deploy/client/client_utils.pyi +77 -0
  23. matrice_inference/deploy/client/streaming_gateway/streaming_gateway.cpython-312-x86_64-linux-gnu.so +0 -0
  24. matrice_inference/deploy/client/streaming_gateway/streaming_gateway.pyi +120 -0
  25. matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  26. matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.pyi +442 -0
  27. matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.cpython-312-x86_64-linux-gnu.so +0 -0
  28. matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.pyi +19 -0
  29. matrice_inference/deploy/optimize/cache_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  30. matrice_inference/deploy/optimize/cache_manager.pyi +15 -0
  31. matrice_inference/deploy/optimize/frame_comparators.cpython-312-x86_64-linux-gnu.so +0 -0
  32. matrice_inference/deploy/optimize/frame_comparators.pyi +203 -0
  33. matrice_inference/deploy/optimize/frame_difference.cpython-312-x86_64-linux-gnu.so +0 -0
  34. matrice_inference/deploy/optimize/frame_difference.pyi +165 -0
  35. matrice_inference/deploy/optimize/transmission.cpython-312-x86_64-linux-gnu.so +0 -0
  36. matrice_inference/deploy/optimize/transmission.pyi +97 -0
  37. matrice_inference/deploy/server/inference/batch_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  38. matrice_inference/deploy/server/inference/batch_manager.pyi +50 -0
  39. matrice_inference/deploy/server/inference/inference_interface.cpython-312-x86_64-linux-gnu.so +0 -0
  40. matrice_inference/deploy/server/inference/inference_interface.pyi +114 -0
  41. matrice_inference/deploy/server/inference/model_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  42. matrice_inference/deploy/server/inference/model_manager.pyi +80 -0
  43. matrice_inference/deploy/server/inference/triton_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  44. matrice_inference/deploy/server/inference/triton_utils.pyi +115 -0
  45. matrice_inference/deploy/server/proxy/proxy_interface.cpython-312-x86_64-linux-gnu.so +0 -0
  46. matrice_inference/deploy/server/proxy/proxy_interface.pyi +90 -0
  47. matrice_inference/deploy/server/proxy/proxy_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  48. matrice_inference/deploy/server/proxy/proxy_utils.pyi +113 -0
  49. matrice_inference/deploy/server/server.cpython-312-x86_64-linux-gnu.so +0 -0
  50. matrice_inference/deploy/server/server.pyi +155 -0
  51. matrice_inference/deploy/server/stream/inference_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  52. matrice_inference/deploy/server/stream/inference_worker.pyi +56 -0
  53. matrice_inference/deploy/server/stream/kafka_consumer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  54. matrice_inference/deploy/server/stream/kafka_consumer_worker.pyi +51 -0
  55. matrice_inference/deploy/server/stream/kafka_producer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  56. matrice_inference/deploy/server/stream/kafka_producer_worker.pyi +50 -0
  57. matrice_inference/deploy/server/stream/stream_debug_logger.cpython-312-x86_64-linux-gnu.so +0 -0
  58. matrice_inference/deploy/server/stream/stream_debug_logger.pyi +47 -0
  59. matrice_inference/deploy/server/stream/stream_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  60. matrice_inference/deploy/server/stream/stream_manager.pyi +69 -0
  61. matrice_inference/deploy/server/stream/video_buffer.cpython-312-x86_64-linux-gnu.so +0 -0
  62. matrice_inference/deploy/server/stream/video_buffer.pyi +120 -0
  63. matrice_inference/deploy/stream/kafka_stream.cpython-312-x86_64-linux-gnu.so +0 -0
  64. matrice_inference/deploy/stream/kafka_stream.pyi +444 -0
  65. matrice_inference/deploy/stream/redis_stream.cpython-312-x86_64-linux-gnu.so +0 -0
  66. matrice_inference/deploy/stream/redis_stream.pyi +447 -0
  67. matrice_inference/deployment/camera_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  68. matrice_inference/deployment/camera_manager.pyi +669 -0
  69. matrice_inference/deployment/deployment.cpython-312-x86_64-linux-gnu.so +0 -0
  70. matrice_inference/deployment/deployment.pyi +736 -0
  71. matrice_inference/deployment/inference_pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
  72. matrice_inference/deployment/inference_pipeline.pyi +527 -0
  73. matrice_inference/deployment/streaming_gateway_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  74. matrice_inference/deployment/streaming_gateway_manager.pyi +275 -0
  75. matrice_inference/py.typed +0 -0
  76. matrice_inference-0.1.0.dist-info/METADATA +26 -0
  77. matrice_inference-0.1.0.dist-info/RECORD +80 -0
  78. matrice_inference-0.1.0.dist-info/WHEEL +5 -0
  79. matrice_inference-0.1.0.dist-info/licenses/LICENSE.txt +21 -0
  80. matrice_inference-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,55 @@
1
+ """Auto-generated stub for module: aggregator."""
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ from collections import defaultdict
5
+ from queue import Queue, Empty
6
+ import copy
7
+ import logging
8
+ import threading
9
+ import time
10
+
11
+ # Classes
12
+ class ResultsAggregator:
13
+ """
14
+ Handles complex aggregation and combination of synchronized results from multiple deployments.
15
+ This component takes synchronized results and combines them into meaningful aggregated outputs
16
+ while maintaining consistent structure with individual deployment results.
17
+ """
18
+
19
+ def __init__(self: Any, synchronized_results_queue: Any, aggregate_by_location: bool = False) -> None: ...
20
+ """
21
+ Initialize the results aggregator.
22
+
23
+ Args:
24
+ synchronized_results_queue: Queue containing synchronized results from synchronizer
25
+ aggregation_strategies: List of aggregation strategies to apply
26
+ """
27
+
28
+ def cleanup(self: Any) -> None: ...
29
+ """
30
+ Clean up resources.
31
+ """
32
+
33
+ def get_health_status(self: Any) -> Dict[str, Any]: ...
34
+ """
35
+ Get health status of the aggregator.
36
+ """
37
+
38
+ def get_stats(self: Any) -> Dict[str, Any]: ...
39
+ """
40
+ Get current aggregation statistics.
41
+ """
42
+
43
+ def start_aggregation(self: Any) -> bool: ...
44
+ """
45
+ Start the results aggregation process.
46
+
47
+ Returns:
48
+ bool: True if aggregation started successfully, False otherwise
49
+ """
50
+
51
+ def stop_aggregation(self: Any) -> Any: ...
52
+ """
53
+ Stop the results aggregation process.
54
+ """
55
+
@@ -0,0 +1,63 @@
1
+ """Auto-generated stub for module: analytics."""
2
+ from typing import Any, Dict, List, Optional, Tuple
3
+
4
+ from confluent_kafka import Producer
5
+ from matrice_common.session import Session
6
+ import base64
7
+ import json
8
+ import logging
9
+ import threading
10
+ import time
11
+
12
+ # Classes
13
+ class AnalyticsSummarizer:
14
+ """
15
+ Buffers aggregated camera_results and emits 5-minute rollups per camera
16
+ focusing on tracking_stats per application.
17
+
18
+ Output structure example per camera:
19
+ {
20
+ "camera_name": "camera_1",
21
+ "inferencePipelineId": "pipeline-xyz",
22
+ "camera_group": "group_a",
23
+ "location": "Lobby",
24
+ "agg_apps": [
25
+ {
26
+ "application_name": "People Counting",
27
+ "application_key_name": "People_Counting",
28
+ "application_version": "1.3",
29
+ "tracking_stats": {
30
+ "input_timestamp": "00:00:09.9", # last seen
31
+ "reset_timestamp": "00:00:00", # earliest seen in window
32
+ "current_counts": [{"category": "person", "count": 4}], # last seen
33
+ "total_counts": [{"category": "person", "count": 37}] # max seen in window
34
+ }
35
+ }
36
+ ],
37
+ "summary_metadata": {
38
+ "window_seconds": 300,
39
+ "messages_aggregated": 123,
40
+ "start_time": 1710000000.0,
41
+ "end_time": 1710000300.0
42
+ }
43
+ }
44
+ """
45
+
46
+ def __init__(self: Any, session: Session, inference_pipeline_id: str, flush_interval_seconds: int = 300) -> None: ...
47
+
48
+ def cleanup(self: Any) -> None: ...
49
+
50
+ def get_health_status(self: Any) -> Dict[str, Any]: ...
51
+
52
+ def get_stats(self: Any) -> Dict[str, Any]: ...
53
+
54
+ def ingest_result(self: Any, aggregated_result: Dict[str, Any]) -> None: ...
55
+ """
56
+ Receive a single aggregated camera_results payload for buffering.
57
+ This is intended to be called by the publisher after successful publish.
58
+ """
59
+
60
+ def start(self: Any) -> bool: ...
61
+
62
+ def stop(self: Any) -> None: ...
63
+
@@ -0,0 +1,79 @@
1
+ """Auto-generated stub for module: ingestor."""
2
+ from typing import Any, Dict, List, Optional, Tuple
3
+
4
+ from matrice_common.session import Session
5
+ from matrice_inference.deploy.stream.kafka_stream import MatriceKafkaDeployment
6
+ from queue import Empty, PriorityQueue, Full
7
+ import itertools
8
+ import logging
9
+ import threading
10
+ import time
11
+
12
+ # Classes
13
+ class ResultsIngestor:
14
+ """
15
+ Streams and manages results from multiple deployments.
16
+ Handles result collection, queuing, and distribution with enhanced structure consistency.
17
+ """
18
+
19
+ def __init__(self: Any, deployment_ids: List[str], session: Session, consumer_timeout: float = 60) -> None: ...
20
+ """
21
+ Initialize the results streamer.
22
+
23
+ Args:
24
+ deployment_ids: List of deployment IDs
25
+ session: Session object for authentication
26
+ consumer_timeout: Timeout for consuming results from deployments
27
+ """
28
+
29
+ def cleanup(self: Any) -> None: ...
30
+ """
31
+ Clean up all resources.
32
+ """
33
+
34
+ def get_all_results(self: Any, timeout: float = 1.0) -> List[Dict]: ...
35
+ """
36
+ Get results from all deployment queues.
37
+
38
+ Args:
39
+ timeout: Timeout for getting results
40
+
41
+ Returns:
42
+ List[Dict]: List of result dictionaries
43
+ """
44
+
45
+ def get_health_status(self: Any) -> Dict: ...
46
+ """
47
+ Get health status of the results streamer.
48
+ """
49
+
50
+ def get_results(self: Any, deployment_id: str, timeout: float = 1.0) -> Optional[Dict]: ...
51
+ """
52
+ Get a result from a specific deployment's priority queue.
53
+
54
+ Args:
55
+ deployment_id: ID of the deployment
56
+ timeout: Timeout for getting the result
57
+
58
+ Returns:
59
+ Dict: Result dictionary or None if timeout/no result
60
+ """
61
+
62
+ def get_stats(self: Any) -> Dict: ...
63
+ """
64
+ Get current statistics.
65
+ """
66
+
67
+ def start_streaming(self: Any) -> bool: ...
68
+ """
69
+ Start streaming results from all deployments.
70
+
71
+ Returns:
72
+ bool: True if streaming started successfully, False otherwise
73
+ """
74
+
75
+ def stop_streaming(self: Any) -> None: ...
76
+ """
77
+ Stop all streaming operations.
78
+ """
79
+
@@ -0,0 +1,139 @@
1
+ """Auto-generated stub for module: pipeline."""
2
+ from typing import Any, Dict
3
+
4
+ from matrice_common.session import Session
5
+ from matrice_inference.deploy.aggregator.aggregator import ResultsAggregator
6
+ from matrice_inference.deploy.aggregator.analytics import AnalyticsSummarizer
7
+ from matrice_inference.deploy.aggregator.ingestor import ResultsIngestor
8
+ from matrice_inference.deploy.aggregator.publisher import ResultsPublisher
9
+ from matrice_inference.deploy.aggregator.synchronizer import ResultsSynchronizer
10
+ from matrice_inference.deployment.inference_pipeline import InferencePipeline
11
+ from queue import Queue
12
+ import logging
13
+ import time
14
+
15
+ # Classes
16
+ class ResultsAggregationPipeline:
17
+ """
18
+ Enhanced deployments aggregator that handles multiple streams, synchronizes results,
19
+ and outputs aggregated results to Kafka topics with consistent structure.
20
+
21
+ This class orchestrates the complete pipeline for collecting, synchronizing, and
22
+ publishing results from multiple ML model deployments in an inference pipeline,
23
+ ensuring all results follow the same structure as individual deployment results.
24
+
25
+ Usage Example:
26
+ ```python
27
+ from matrice import Session
28
+ from matrice_inference.deploy.aggregator import ResultsAggregationPipeline
29
+
30
+ # Initialize session
31
+ session = Session(account_number="...", access_key="...", secret_key="...")
32
+
33
+ # Create aggregator for an inference pipeline
34
+ aggregator = ResultsAggregationPipeline(session, "your-inference-pipeline-id")
35
+
36
+ # Setup the aggregation pipeline
37
+ if aggregator.setup_components():
38
+ print(f"Setup complete for {len(aggregator.deployment_ids)} deployments")
39
+
40
+ # Start streaming and run until keyboard interrupt
41
+ try:
42
+ aggregator.start_streaming()
43
+ except KeyboardInterrupt:
44
+ print("Pipeline stopped by user")
45
+ finally:
46
+ aggregator.cleanup()
47
+ ```
48
+ """
49
+
50
+ def __init__(self: Any, session: Session, action_record_id: str) -> None: ...
51
+ """
52
+ Initialize the deployments aggregator.
53
+
54
+ Args:
55
+ session: Session object for authentication
56
+ action_record_id: Action Record ID
57
+ """
58
+
59
+ def cleanup(self: Any) -> None: ...
60
+ """
61
+ Clean up all resources.
62
+ """
63
+
64
+ def force_sync_pending_results(self: Any) -> int: ...
65
+ """
66
+ Force synchronization of all pending results.
67
+
68
+ Returns:
69
+ int: Number of pending results that were synchronized
70
+ """
71
+
72
+ def get_deployment_info(self: Any) -> Dict: ...
73
+ """
74
+ Get information about the deployments in this aggregator.
75
+
76
+ Returns:
77
+ Dict: Deployment information including IDs, count, and status
78
+ """
79
+
80
+ def get_health_status(self: Any) -> Dict: ...
81
+ """
82
+ Get health status of all components.
83
+ """
84
+
85
+ def get_stats(self: Any) -> Dict: ...
86
+ """
87
+ Get current statistics from all components.
88
+ """
89
+
90
+ def setup_components(self: Any) -> bool: ...
91
+ """
92
+ Setup all components and initialize the aggregation pipeline.
93
+
94
+ Returns:
95
+ bool: True if all components initialized successfully, False otherwise
96
+ """
97
+
98
+ def start_logging(self: Any, status_interval: int = 30) -> None: ...
99
+ """
100
+ Start the pipeline logging and run until interrupted.
101
+ Args:
102
+ status_interval: Interval in seconds between status log messages
103
+ """
104
+
105
+ def start_streaming(self: Any, block: bool = True) -> bool: ...
106
+ """
107
+ Start the complete streaming pipeline: ingestion, synchronization, aggregation, and publishing.
108
+
109
+ Returns:
110
+ bool: True if streaming started successfully, False otherwise
111
+ """
112
+
113
+ def stop_streaming(self: Any) -> None: ...
114
+ """
115
+ Stop all streaming operations in reverse order.
116
+ """
117
+
118
+ def update_status(self: Any, step_code: str, status: str, status_description: str) -> None: ...
119
+ """
120
+ Update status of data preparation.
121
+
122
+ Args:
123
+ step_code: Code indicating current step
124
+ status: Status of step
125
+ status_description: Description of status
126
+ """
127
+
128
+ def wait_for_ready(self: Any, timeout: int = 300, poll_interval: int = 10) -> bool: ...
129
+ """
130
+ Wait for the aggregator to be ready and processing results.
131
+
132
+ Args:
133
+ timeout: Maximum time to wait in seconds
134
+ poll_interval: Time between checks in seconds
135
+
136
+ Returns:
137
+ bool: True if aggregator is ready, False if timeout
138
+ """
139
+
@@ -0,0 +1,59 @@
1
+ """Auto-generated stub for module: publisher."""
2
+ from typing import Any, Dict, Optional
3
+
4
+ from matrice_common.session import Session
5
+ from matrice_inference.deploy.stream.kafka_stream import MatriceKafkaDeployment
6
+ from queue import Queue, Empty
7
+ import logging
8
+ import threading
9
+ import time
10
+
11
+ # Classes
12
+ class ResultsPublisher:
13
+ """
14
+ Streams final aggregated results from inference pipeline to Kafka.
15
+ Handles result collection, queuing, and distribution with proper error handling
16
+ for the enhanced aggregated result structure.
17
+ """
18
+
19
+ def __init__(self: Any, inference_pipeline_id: str, session: Session, final_results_queue: Any, analytics_summarizer: Optional[Any] = None) -> None: ...
20
+ """
21
+ Initialize the final results streamer.
22
+
23
+ Args:
24
+ inference_pipeline_id: ID of the inference pipeline
25
+ session: Session object for authentication
26
+ final_results_queue: Queue containing final aggregated results
27
+ """
28
+
29
+ def get_health_status(self: Any) -> Dict[str, Any]: ...
30
+ """
31
+ Get health status of the publisher.
32
+ """
33
+
34
+ def get_stats(self: Any) -> Dict[str, Any]: ...
35
+ """
36
+ Get streaming statistics.
37
+
38
+ Returns:
39
+ Dict containing statistics
40
+ """
41
+
42
+ def is_running(self: Any) -> bool: ...
43
+ """
44
+ Check if the streamer is currently running.
45
+ """
46
+
47
+ def start_streaming(self: Any) -> bool: ...
48
+ """
49
+ Start streaming final results to Kafka.
50
+
51
+ Returns:
52
+ bool: True if streaming started successfully, False otherwise
53
+ """
54
+
55
+ def stop_streaming(self: Any) -> None: ...
56
+ """
57
+ Stop streaming final results.
58
+ """
59
+
@@ -0,0 +1,58 @@
1
+ """Auto-generated stub for module: synchronizer."""
2
+ from typing import Any, Dict, List, Tuple
3
+
4
+ from collections import defaultdict
5
+ from queue import Queue, Empty, PriorityQueue
6
+ import logging
7
+ import threading
8
+ import time
9
+
10
+ # Classes
11
+ class ResultsSynchronizer:
12
+ """
13
+ Handles synchronization of results from multiple deployments by stream_key and input_order.
14
+ Ensures consistent structure and proper error handling for the aggregation pipeline.
15
+ """
16
+
17
+ def __init__(self: Any, results_queues: Dict[str, PriorityQueue], sync_timeout: float = 60.0) -> None: ...
18
+ """
19
+ Initialize the results synchronizer.
20
+
21
+ Args:
22
+ results_queues: Dictionary of priority queues containing results from deployments
23
+ sync_timeout: Maximum time to wait for input_order synchronization (in seconds)
24
+ """
25
+
26
+ def cleanup(self: Any) -> None: ...
27
+ """
28
+ Clean up resources.
29
+ """
30
+
31
+ def force_sync_pending(self: Any) -> int: ...
32
+ """
33
+ Force synchronization of all pending results regardless of completeness.
34
+ """
35
+
36
+ def get_health_status(self: Any) -> Dict: ...
37
+ """
38
+ Get health status of the synchronizer.
39
+ """
40
+
41
+ def get_stats(self: Any) -> Dict: ...
42
+ """
43
+ Get current synchronization statistics.
44
+ """
45
+
46
+ def start_synchronization(self: Any) -> bool: ...
47
+ """
48
+ Start the results synchronization process.
49
+
50
+ Returns:
51
+ bool: True if synchronization started successfully, False otherwise
52
+ """
53
+
54
+ def stop_synchronization(self: Any) -> Any: ...
55
+ """
56
+ Stop the results synchronization process.
57
+ """
58
+
@@ -0,0 +1,145 @@
1
+ """Auto-generated stub for module: auto_streaming."""
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ from auto_streaming_utils import AutoStreamingUtils
5
+ from matrice_inference.deploy.client.streaming_gateway import StreamingGateway, ModelInputType, OutputConfig, InputConfig
6
+ from matrice_inference.deployment.camera_manager import CameraManager
7
+ from matrice_inference.deployment.streaming_gateway_manager import StreamingGateway
8
+ from matrice_inference.deployment.streaming_gateway_manager import StreamingGatewayManager
9
+ import logging
10
+ import threading
11
+ import time
12
+
13
+ # Classes
14
+ class AutoStreaming:
15
+ """
16
+ Handles automatic streaming setup and management using streaming gateway configurations.
17
+
18
+ This class manages multiple streaming gateways, automatically configures cameras
19
+ based on the gateway's camera group assignments, and handles the streaming lifecycle.
20
+
21
+ Example usage:
22
+ # Method 1: From service IDs (auto-discovers all gateways)
23
+ auto_streaming = AutoStreaming(
24
+ session=session,
25
+ service_ids=["service_id_123", "service_id_456"],
26
+ model_input_type=ModelInputType.FRAMES
27
+ )
28
+
29
+ # Method 2: From specific gateway IDs
30
+ auto_streaming = AutoStreaming(
31
+ session=session,
32
+ streaming_gateway_ids=["gateway1", "gateway2"],
33
+ model_input_type=ModelInputType.FRAMES
34
+ )
35
+
36
+ # Start auto streaming
37
+ success = auto_streaming.start()
38
+
39
+ # Stop auto streaming
40
+ auto_streaming.stop()
41
+
42
+ # Get statistics
43
+ stats = auto_streaming.get_statistics()
44
+ """
45
+
46
+ def __init__(self: Any, session: Any, service_ids: List[str] = None, streaming_gateway_ids: List[str] = None, model_input_type: Any = ModelInputType.FRAMES, output_configs: Optional[Dict[str, OutputConfig]] = None, result_callback: Optional[Callable] = None, strip_input_from_result: bool = True, default_fps: int = 30, default_quality: int = 80, default_video_chunk_duration: int = 10, default_video_format: str = 'mp4', simulate_video_file_stream: bool = False) -> None: ...
47
+ """
48
+ Initialize AutoStreaming with service IDs or streaming gateway IDs.
49
+
50
+ Args:
51
+ session: Session object for authentication
52
+ service_ids: List of Service IDs (deployment or inference pipeline ID) - will auto-discover gateways
53
+ streaming_gateway_ids: List of specific streaming gateway IDs to use
54
+ model_input_type: Model input type (FRAMES or VIDEO)
55
+ output_configs: Optional output configurations per streaming gateway
56
+ result_callback: Optional callback for processing results
57
+ strip_input_from_result: Whether to strip input from results
58
+ default_fps: Default FPS for camera streams
59
+ default_quality: Default quality for camera streams
60
+ default_video_chunk_duration: Default video chunk duration for video input type
61
+ default_video_format: Default video format for video input type
62
+ simulate_video_file_stream: Whether to restream videos
63
+ Note:
64
+ Either service_ids OR streaming_gateway_ids must be provided, not both.
65
+ If service_ids is provided, all gateways for those services will be auto-discovered.
66
+ """
67
+
68
+ def add_streaming_gateway(self: Any, gateway_id: str) -> bool: ...
69
+ """
70
+ Add a new streaming gateway to auto streaming.
71
+
72
+ Args:
73
+ gateway_id: ID of the streaming gateway to add
74
+
75
+ Returns:
76
+ bool: True if gateway was added successfully
77
+ """
78
+
79
+ def get_gateway_status(self: Any, gateway_id: str) -> Optional[Dict]: ...
80
+ """
81
+ Get status for a specific streaming gateway.
82
+
83
+ Args:
84
+ gateway_id: Streaming gateway ID
85
+
86
+ Returns:
87
+ Dict with gateway status or None if not found
88
+ """
89
+
90
+ def get_statistics(self: Any) -> Dict: ...
91
+ """
92
+ Get auto streaming statistics.
93
+
94
+ Returns:
95
+ Dict with comprehensive statistics
96
+ """
97
+
98
+ def refresh_camera_configs(self: Any) -> bool: ...
99
+ """
100
+ Refresh camera configurations for all streaming gateways.
101
+
102
+ Returns:
103
+ bool: True if configurations were refreshed successfully
104
+ """
105
+
106
+ def remove_streaming_gateway(self: Any, gateway_id: str) -> bool: ...
107
+ """
108
+ Remove a streaming gateway from auto streaming.
109
+
110
+ Args:
111
+ gateway_id: ID of the streaming gateway to remove
112
+
113
+ Returns:
114
+ bool: True if gateway was removed successfully
115
+ """
116
+
117
+ def setup_streaming_gateways(self: Any, gateway_input_configs: Dict[str, List[InputConfig]] = None) -> Dict[str, StreamingGateway]: ...
118
+ """
119
+ Setup StreamingGateway instances for each streaming gateway ID.
120
+
121
+ Returns:
122
+ bool: True if all gateways were setup successfully, False otherwise
123
+ """
124
+
125
+ def setup_streaming_gateways_input_configs(self: Any) -> Optional[Dict[str, List[InputConfig]]]: ...
126
+ """
127
+ Setup input configurations for each streaming gateway ID.
128
+
129
+ Returns:
130
+ bool: True if all gateway input configs were setup successfully, False otherwise
131
+ """
132
+
133
+ def start(self: Any, send_to_api: bool = False) -> bool: ...
134
+ """
135
+ Start auto streaming for all configured streaming gateways.
136
+
137
+ Returns:
138
+ bool: True if streaming started successfully, False otherwise
139
+ """
140
+
141
+ def stop(self: Any) -> Any: ...
142
+ """
143
+ Stop auto streaming for all gateways.
144
+ """
145
+