matrice-inference 0.1.0__py3-none-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-inference might be problematic. Click here for more details.

Files changed (80) hide show
  1. matrice_inference/deploy/aggregator/aggregator.cpython-312-x86_64-linux-gnu.so +0 -0
  2. matrice_inference/deploy/aggregator/aggregator.pyi +55 -0
  3. matrice_inference/deploy/aggregator/analytics.cpython-312-x86_64-linux-gnu.so +0 -0
  4. matrice_inference/deploy/aggregator/analytics.pyi +63 -0
  5. matrice_inference/deploy/aggregator/ingestor.cpython-312-x86_64-linux-gnu.so +0 -0
  6. matrice_inference/deploy/aggregator/ingestor.pyi +79 -0
  7. matrice_inference/deploy/aggregator/pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
  8. matrice_inference/deploy/aggregator/pipeline.pyi +139 -0
  9. matrice_inference/deploy/aggregator/publisher.cpython-312-x86_64-linux-gnu.so +0 -0
  10. matrice_inference/deploy/aggregator/publisher.pyi +59 -0
  11. matrice_inference/deploy/aggregator/synchronizer.cpython-312-x86_64-linux-gnu.so +0 -0
  12. matrice_inference/deploy/aggregator/synchronizer.pyi +58 -0
  13. matrice_inference/deploy/client/auto_streaming/auto_streaming.cpython-312-x86_64-linux-gnu.so +0 -0
  14. matrice_inference/deploy/client/auto_streaming/auto_streaming.pyi +145 -0
  15. matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  16. matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.pyi +126 -0
  17. matrice_inference/deploy/client/client.cpython-312-x86_64-linux-gnu.so +0 -0
  18. matrice_inference/deploy/client/client.pyi +337 -0
  19. matrice_inference/deploy/client/client_stream_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  20. matrice_inference/deploy/client/client_stream_utils.pyi +83 -0
  21. matrice_inference/deploy/client/client_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  22. matrice_inference/deploy/client/client_utils.pyi +77 -0
  23. matrice_inference/deploy/client/streaming_gateway/streaming_gateway.cpython-312-x86_64-linux-gnu.so +0 -0
  24. matrice_inference/deploy/client/streaming_gateway/streaming_gateway.pyi +120 -0
  25. matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  26. matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.pyi +442 -0
  27. matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.cpython-312-x86_64-linux-gnu.so +0 -0
  28. matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.pyi +19 -0
  29. matrice_inference/deploy/optimize/cache_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  30. matrice_inference/deploy/optimize/cache_manager.pyi +15 -0
  31. matrice_inference/deploy/optimize/frame_comparators.cpython-312-x86_64-linux-gnu.so +0 -0
  32. matrice_inference/deploy/optimize/frame_comparators.pyi +203 -0
  33. matrice_inference/deploy/optimize/frame_difference.cpython-312-x86_64-linux-gnu.so +0 -0
  34. matrice_inference/deploy/optimize/frame_difference.pyi +165 -0
  35. matrice_inference/deploy/optimize/transmission.cpython-312-x86_64-linux-gnu.so +0 -0
  36. matrice_inference/deploy/optimize/transmission.pyi +97 -0
  37. matrice_inference/deploy/server/inference/batch_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  38. matrice_inference/deploy/server/inference/batch_manager.pyi +50 -0
  39. matrice_inference/deploy/server/inference/inference_interface.cpython-312-x86_64-linux-gnu.so +0 -0
  40. matrice_inference/deploy/server/inference/inference_interface.pyi +114 -0
  41. matrice_inference/deploy/server/inference/model_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  42. matrice_inference/deploy/server/inference/model_manager.pyi +80 -0
  43. matrice_inference/deploy/server/inference/triton_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  44. matrice_inference/deploy/server/inference/triton_utils.pyi +115 -0
  45. matrice_inference/deploy/server/proxy/proxy_interface.cpython-312-x86_64-linux-gnu.so +0 -0
  46. matrice_inference/deploy/server/proxy/proxy_interface.pyi +90 -0
  47. matrice_inference/deploy/server/proxy/proxy_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  48. matrice_inference/deploy/server/proxy/proxy_utils.pyi +113 -0
  49. matrice_inference/deploy/server/server.cpython-312-x86_64-linux-gnu.so +0 -0
  50. matrice_inference/deploy/server/server.pyi +155 -0
  51. matrice_inference/deploy/server/stream/inference_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  52. matrice_inference/deploy/server/stream/inference_worker.pyi +56 -0
  53. matrice_inference/deploy/server/stream/kafka_consumer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  54. matrice_inference/deploy/server/stream/kafka_consumer_worker.pyi +51 -0
  55. matrice_inference/deploy/server/stream/kafka_producer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  56. matrice_inference/deploy/server/stream/kafka_producer_worker.pyi +50 -0
  57. matrice_inference/deploy/server/stream/stream_debug_logger.cpython-312-x86_64-linux-gnu.so +0 -0
  58. matrice_inference/deploy/server/stream/stream_debug_logger.pyi +47 -0
  59. matrice_inference/deploy/server/stream/stream_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  60. matrice_inference/deploy/server/stream/stream_manager.pyi +69 -0
  61. matrice_inference/deploy/server/stream/video_buffer.cpython-312-x86_64-linux-gnu.so +0 -0
  62. matrice_inference/deploy/server/stream/video_buffer.pyi +120 -0
  63. matrice_inference/deploy/stream/kafka_stream.cpython-312-x86_64-linux-gnu.so +0 -0
  64. matrice_inference/deploy/stream/kafka_stream.pyi +444 -0
  65. matrice_inference/deploy/stream/redis_stream.cpython-312-x86_64-linux-gnu.so +0 -0
  66. matrice_inference/deploy/stream/redis_stream.pyi +447 -0
  67. matrice_inference/deployment/camera_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  68. matrice_inference/deployment/camera_manager.pyi +669 -0
  69. matrice_inference/deployment/deployment.cpython-312-x86_64-linux-gnu.so +0 -0
  70. matrice_inference/deployment/deployment.pyi +736 -0
  71. matrice_inference/deployment/inference_pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
  72. matrice_inference/deployment/inference_pipeline.pyi +527 -0
  73. matrice_inference/deployment/streaming_gateway_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  74. matrice_inference/deployment/streaming_gateway_manager.pyi +275 -0
  75. matrice_inference/py.typed +0 -0
  76. matrice_inference-0.1.0.dist-info/METADATA +26 -0
  77. matrice_inference-0.1.0.dist-info/RECORD +80 -0
  78. matrice_inference-0.1.0.dist-info/WHEEL +5 -0
  79. matrice_inference-0.1.0.dist-info/licenses/LICENSE.txt +21 -0
  80. matrice_inference-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,203 @@
1
+ """Auto-generated stub for module: frame_comparators."""
2
+ from typing import Any, Optional, Tuple
3
+
4
+ from PIL import Image
5
+ from imagehash import average_hash, phash, dhash
6
+ from skimage.metrics import structural_similarity
7
+ import cv2
8
+ import logging
9
+ import numpy as np
10
+
11
+ # Classes
12
+ class AbsDiffComparator(FrameComparator):
13
+ """
14
+ Compare frames using absolute difference.
15
+ """
16
+
17
+ def __init__(self: Any, threshold: float = 10.0) -> None: ...
18
+ """
19
+ Initialize with threshold for mean absolute difference.
20
+
21
+ Args:
22
+ threshold: Mean difference threshold (default: 10.0).
23
+
24
+ Raises:
25
+ ValueError: If threshold is negative.
26
+ """
27
+
28
+ def compare(self: Any, static_frame: Any, new_frame: Any, stream_key: Optional[str] = None) -> Tuple[bool, float]: ...
29
+ """
30
+ Compare frames using mean absolute difference.
31
+
32
+ Args:
33
+ static_frame: Reference frame (RGB, cv2 image as np.ndarray).
34
+ new_frame: New frame to compare (RGB, cv2 image as np.ndarray).
35
+ stream_key: Optional identifier for the video stream (e.g., camera ID).
36
+
37
+ Returns:
38
+ Tuple[bool, float]: (is_similar, mean_difference)
39
+ """
40
+
41
+ class AverageHashComparator(FrameComparator):
42
+ """
43
+ Compares frames using average hashing (aHash).
44
+ """
45
+
46
+ def __init__(self: Any, threshold: int = 5) -> None: ...
47
+ """
48
+ Initialize with threshold for hash difference.
49
+
50
+ Args:
51
+ threshold: Hash difference threshold (default: 5).
52
+
53
+ Raises:
54
+ ValueError: If threshold is negative.
55
+ """
56
+
57
+ def compare(self: Any, static_frame: Any, new_frame: Any, stream_key: Optional[str] = None) -> Tuple[bool, float]: ...
58
+ """
59
+ Compare frames using average hash difference.
60
+
61
+ Args:
62
+ static_frame: Reference frame (RGB, cv2 image as np.ndarray).
63
+ new_frame: New frame to compare (RGB, cv2 image as np.ndarray).
64
+ stream_key: Optional identifier for the video stream (e.g., camera ID).
65
+
66
+ Returns:
67
+ Tuple[bool, float]: (is_similar, hash_difference)
68
+ """
69
+
70
+ class DifferenceHashComparator(FrameComparator):
71
+ """
72
+ Compares frames using difference hashing (dHash).
73
+ """
74
+
75
+ def __init__(self: Any, threshold: int = 5) -> None: ...
76
+ """
77
+ Initialize with threshold for hash difference.
78
+
79
+ Args:
80
+ threshold: Hash difference threshold (default: 5).
81
+
82
+ Raises:
83
+ ValueError: If threshold is negative.
84
+ """
85
+
86
+ def compare(self: Any, static_frame: Any, new_frame: Any, stream_key: Optional[str] = None) -> Tuple[bool, float]: ...
87
+ """
88
+ Compare frames using difference hash difference.
89
+
90
+ Args:
91
+ static_frame: Reference frame (RGB, cv2 image as np.ndarray).
92
+ new_frame: New frame to compare (RGB, cv2 image as np.ndarray).
93
+ stream_key: Optional identifier for the video stream (e.g., camera ID).
94
+
95
+ Returns:
96
+ Tuple[bool, float]: (is_similar, hash_difference)
97
+ """
98
+
99
+ class FrameComparator:
100
+ """
101
+ Base class for frame comparison methods.
102
+ """
103
+
104
+ def compare(self: Any, static_frame: Any, new_frame: Any, stream_key: Optional[str] = None) -> Tuple[bool, float]: ...
105
+ """
106
+ Compare frames and determine if they are similar.
107
+
108
+ Args:
109
+ static_frame: Reference frame (RGB, cv2 image as np.ndarray).
110
+ new_frame: New frame to compare (RGB, cv2 image as np.ndarray).
111
+ stream_key: Optional identifier for the video stream (e.g., camera ID).
112
+
113
+ Returns:
114
+ Tuple[bool, float]: (is_similar, similarity_score)
115
+ """
116
+
117
+ class HistogramComparator(FrameComparator):
118
+ """
119
+ Compare frames using histogram correlation.
120
+ """
121
+
122
+ def __init__(self: Any, threshold: float = 0.9) -> None: ...
123
+ """
124
+ Initialize with threshold for histogram correlation.
125
+
126
+ Args:
127
+ threshold: Correlation score threshold (default: 0.9).
128
+
129
+ Raises:
130
+ ValueError: If threshold is not in [0, 1].
131
+ """
132
+
133
+ def compare(self: Any, static_frame: Any, new_frame: Any, stream_key: Optional[str] = None) -> Tuple[bool, float]: ...
134
+ """
135
+ Compare frames using histogram correlation.
136
+
137
+ Args:
138
+ static_frame: Reference frame (RGB, cv2 image as np.ndarray).
139
+ new_frame: New frame to compare (RGB, cv2 image as np.ndarray).
140
+ stream_key: Optional identifier for the video stream (e.g., camera ID).
141
+
142
+ Returns:
143
+ Tuple[bool, float]: (is_similar, correlation_score)
144
+ """
145
+
146
+ class PerceptualHashComparator(FrameComparator):
147
+ """
148
+ Compares frames using perceptual hashing (pHash).
149
+ """
150
+
151
+ def __init__(self: Any, threshold: int = 6) -> None: ...
152
+ """
153
+ Initialize with threshold for hash difference.
154
+
155
+ Args:
156
+ threshold: Hash difference threshold (default: 6).
157
+
158
+ Raises:
159
+ ValueError: If threshold is negative.
160
+ """
161
+
162
+ def compare(self: Any, static_frame: Any, new_frame: Any, stream_key: Optional[str] = None) -> Tuple[bool, float]: ...
163
+ """
164
+ Compare frames using perceptual hash difference.
165
+
166
+ Args:
167
+ static_frame: Reference frame (RGB, cv2 image as np.ndarray).
168
+ new_frame: New frame to compare (RGB, cv2 image as np.ndarray).
169
+ stream_key: Optional identifier for the video stream (e.g., camera ID).
170
+
171
+ Returns:
172
+ Tuple[bool, float]: (is_similar, hash_difference)
173
+ """
174
+
175
+ class SSIMComparator(FrameComparator):
176
+ """
177
+ Compare frames using Structural Similarity Index (SSIM).
178
+ """
179
+
180
+ def __init__(self: Any, threshold: float = 0.9) -> None: ...
181
+ """
182
+ Initialize with threshold for SSIM score.
183
+
184
+ Args:
185
+ threshold: SSIM score threshold (default: 0.9).
186
+
187
+ Raises:
188
+ ValueError: If threshold is not in [0, 1].
189
+ """
190
+
191
+ def compare(self: Any, static_frame: Any, new_frame: Any, stream_key: Optional[str] = None) -> Tuple[bool, float]: ...
192
+ """
193
+ Compare frames using SSIM.
194
+
195
+ Args:
196
+ static_frame: Reference frame (RGB, cv2 image as np.ndarray).
197
+ new_frame: New frame to compare (RGB, cv2 image as np.ndarray).
198
+ stream_key: Optional identifier for the video stream (e.g., camera ID).
199
+
200
+ Returns:
201
+ Tuple[bool, float]: (is_similar, ssim_score)
202
+ """
203
+
@@ -0,0 +1,165 @@
1
+ """Auto-generated stub for module: frame_difference."""
2
+ from typing import Any, Dict, Optional, Tuple, Union
3
+
4
+ from PIL import Image
5
+ import base64
6
+ import cv2
7
+ import logging
8
+ import numpy as np
9
+
10
+ # Constants
11
+ logger: Any
12
+
13
+ # Classes
14
+ class FrameDifferenceProcessor:
15
+ """
16
+ Handles frame difference calculation and reconstruction for intelligent caching.
17
+ """
18
+
19
+ def __init__(self: Any) -> None: ...
20
+ """
21
+ Initialize frame difference processor.
22
+ """
23
+
24
+ def calculate_frame_difference(self: Any, reference_frame: Any, current_frame: Any) -> Tuple[np.ndarray, Dict[str, Any]]: ...
25
+ """
26
+ Calculate difference between reference and current frame.
27
+
28
+ Args:
29
+ reference_frame: Reference frame (RGB, cv2 image as np.ndarray)
30
+ current_frame: Current frame to compare (RGB, cv2 image as np.ndarray)
31
+
32
+ Returns:
33
+ Tuple of (difference_data, metadata)
34
+ """
35
+
36
+ def decode_base64_to_frame(self: Any, encoded_frame: str) -> Optional[np.ndarray]: ...
37
+ """
38
+ Decode base64 string to frame.
39
+
40
+ Args:
41
+ encoded_frame: Base64 encoded frame
42
+
43
+ Returns:
44
+ Decoded frame as numpy array or None if failed
45
+ """
46
+
47
+ def decode_frame_difference(self: Any, encoded_diff: str) -> Optional[np.ndarray]: ...
48
+ """
49
+ Decode base64 frame difference data.
50
+
51
+ Args:
52
+ encoded_diff: Base64 encoded difference data
53
+
54
+ Returns:
55
+ Decoded difference as numpy array or None if failed
56
+ """
57
+
58
+ def encode_frame_difference(self: Any, difference_data: Any, metadata: Dict[str, Any], compression_quality: int = 85) -> str: ...
59
+ """
60
+ Encode frame difference data to base64.
61
+
62
+ Args:
63
+ difference_data: Frame difference as numpy array
64
+ metadata: Difference metadata
65
+ compression_quality: JPEG compression quality (1-100)
66
+
67
+ Returns:
68
+ Base64 encoded difference data
69
+ """
70
+
71
+ def encode_frame_to_base64(self: Any, frame: Any, quality: int = 95) -> str: ...
72
+ """
73
+ Encode frame to base64 string.
74
+
75
+ Args:
76
+ frame: Frame as numpy array
77
+ quality: JPEG quality (1-100)
78
+
79
+ Returns:
80
+ Base64 encoded frame
81
+ """
82
+
83
+ def reconstruct_frame(self: Any, reference_frame: Any, difference_data: Any, metadata: Dict[str, Any]) -> Optional[np.ndarray]: ...
84
+ """
85
+ Reconstruct frame from reference frame and difference data.
86
+
87
+ Args:
88
+ reference_frame: Reference frame (RGB, cv2 image as np.ndarray)
89
+ difference_data: Frame difference data
90
+ metadata: Difference metadata
91
+
92
+ Returns:
93
+ Reconstructed frame or None if failed
94
+ """
95
+
96
+ class IntelligentFrameCache:
97
+ """
98
+ Intelligent frame cache with two-threshold logic.
99
+ """
100
+
101
+ def __init__(self: Any, threshold_a: float = 0.95, threshold_b: float = 0.85, max_cache_size: int = 50) -> None: ...
102
+ """
103
+ Initialize intelligent frame cache.
104
+
105
+ Args:
106
+ threshold_a: High similarity threshold for cache reuse
107
+ threshold_b: Medium similarity threshold for difference-based reconstruction
108
+ max_cache_size: Maximum number of cached frames per stream
109
+ """
110
+
111
+ def cache_frame_result(self: Any, stream_key: str, frame: Any, model_result: Any, input_hash: Optional[str] = None) -> None: ...
112
+ """
113
+ Cache frame and its model result.
114
+
115
+ Args:
116
+ stream_key: Stream identifier
117
+ frame: Frame that was processed
118
+ model_result: Result from model inference
119
+ input_hash: Optional input hash for additional indexing
120
+ """
121
+
122
+ def clear_cache(self: Any, stream_key: Optional[str] = None) -> None: ...
123
+ """
124
+ Clear cache for specific stream or all streams.
125
+
126
+ Args:
127
+ stream_key: Stream to clear, or None to clear all
128
+ """
129
+
130
+ def get_cache_stats(self: Any) -> Dict[str, Any]: ...
131
+ """
132
+ Get cache statistics.
133
+
134
+ Returns:
135
+ Dictionary with cache statistics
136
+ """
137
+
138
+ def get_cached_result(self: Any, stream_key: str, action_data: Dict[str, Any]) -> Any: ...
139
+ """
140
+ Get cached result based on action data.
141
+
142
+ Args:
143
+ stream_key: Stream identifier
144
+ action_data: Data from should_use_cache decision
145
+
146
+ Returns:
147
+ Cached model result or None
148
+ """
149
+
150
+ def should_use_cache(self: Any, current_frame: Any, stream_key: str, ssim_comparator: Any) -> Tuple[str, Dict[str, Any]]: ...
151
+ """
152
+ Determine caching strategy based on frame similarity.
153
+
154
+ Args:
155
+ current_frame: Current frame to analyze
156
+ stream_key: Stream identifier
157
+ ssim_comparator: SSIM comparator for similarity calculation
158
+
159
+ Returns:
160
+ Tuple of (action, data) where action is:
161
+ - "use_cache": Use cached result (Threshold A)
162
+ - "use_difference": Use difference-based reconstruction (Threshold B)
163
+ - "process_new": Process as new frame (exceeds both thresholds)
164
+ """
165
+
@@ -0,0 +1,97 @@
1
+ """Auto-generated stub for module: transmission."""
2
+ from typing import Any, Dict, Optional, Tuple
3
+
4
+ from datetime import datetime, timezone
5
+ from frame_comparators import SSIMComparator
6
+ from frame_difference import FrameDifferenceProcessor
7
+ import base64
8
+ import cv2
9
+ import hashlib
10
+ import logging
11
+ import numpy as np
12
+
13
+ # Constants
14
+ logger: Any
15
+
16
+ # Classes
17
+ class ClientTransmissionHandler:
18
+ """
19
+ Client-side transmission handler implementing two-threshold logic.
20
+
21
+ Responsibilities:
22
+ - Maintain last frame per stream for SSIM reference
23
+ - Decide strategy: full, difference, or skip
24
+ - Produce difference payload (base64-encoded JPEG) and metadata
25
+ - Track last full-frame input hash for server cache linking
26
+ """
27
+
28
+ def __init__(self: Any, threshold_a: float = 0.95, threshold_b: float = 0.85) -> None: ...
29
+
30
+ def compute_and_store_full_frame_hash(self: Any, stream_key: str, full_jpeg_bytes: Any) -> str: ...
31
+ """
32
+ Compute deterministic MD5 (non-security) and store it for reference.
33
+ """
34
+
35
+ def decide_transmission(self: Any, frame: Any, stream_key: str) -> Tuple[str, Dict[str, Any]]: ...
36
+ """
37
+ Determine transmission strategy for a frame.
38
+
39
+ Returns: (strategy, data)
40
+ strategy in {"full", "difference", "skip"}
41
+ data contains similarity and optional diff payload metadata
42
+ """
43
+
44
+ def encode_difference(self: Any, difference_data: Any, difference_metadata: Dict[str, Any], quality: int) -> Any: ...
45
+ """
46
+ Encode difference np.ndarray to raw bytes suitable for transport.
47
+ """
48
+
49
+ def prepare_transmission(self: Any, frame: Any) -> Tuple[bytes, Dict[str, Any], str]: ...
50
+ """
51
+ Prepare bytes payload and metadata for transport.
52
+
53
+ Returns (input_bytes, metadata, strategy)
54
+ """
55
+
56
+ class ServerTransmissionHandler:
57
+ """
58
+ Server-side transmission handler for intelligent input handling.
59
+
60
+ Responsibilities:
61
+ - Interpret transmission_strategy from client (skip/difference/full)
62
+ - Resolve cache hits for skip signals
63
+ - Reconstruct frames for difference payloads
64
+ - Perform SSIM similarity checks for optional skipping
65
+ """
66
+
67
+ def __init__(self: Any, ssim_threshold: float = 0.95) -> None: ...
68
+
69
+ def decide_action(self: Any, message: Dict[str, Any], cache_manager: Any, frame_cache: Dict[str, np.ndarray]) -> Tuple[str, Optional[Dict[str, Any]]]: ...
70
+ """
71
+ Decide how to handle an incoming message.
72
+
73
+ Returns (action, payload):
74
+ - ("cached", cached_result)
75
+ - ("similar", None)
76
+ - ("process_difference", None) -> call reconstruct() then process
77
+ - ("process", None)
78
+ """
79
+
80
+ def process_input_message(self: Any, raw_message_value: Dict[str, Any], message_key: Optional[str], consumer_worker_id: str) -> Dict[str, Any]: ...
81
+ """
82
+ Normalize raw Kafka message 'value' into a processed message structure.
83
+
84
+ Handles transmission_strategy: 'skip', 'difference', 'full'.
85
+ Decodes content accordingly and carries through strategy metadata.
86
+ """
87
+
88
+ def reconstruct_from_difference(self: Any, message: Dict[str, Any], frame_cache: Dict[str, np.ndarray]) -> Tuple[Optional[bytes], Optional[str]]: ...
89
+ """
90
+ Reconstruct full frame from difference; returns (jpeg_bytes, effective_hash).
91
+ """
92
+
93
+ def update_frame_cache_from_message(self: Any, message: Dict[str, Any], frame_cache: Dict[str, np.ndarray]) -> None: ...
94
+ """
95
+ If message has image bytes, decode and store for SSIM reference.
96
+ """
97
+
@@ -0,0 +1,50 @@
1
+ """Auto-generated stub for module: batch_manager."""
2
+ from typing import Any, Dict, List, Optional, Tuple, Union
3
+
4
+ from dataclasses import dataclass, field
5
+ from matrice_inference.deploy.utils.post_processing.core.config import BaseConfig
6
+ import asyncio
7
+ import logging
8
+ import time
9
+
10
+ # Classes
11
+ class BatchRequest:
12
+ """
13
+ Represents a single inference request in a batch
14
+ """
15
+
16
+ pass
17
+ class DynamicBatchManager:
18
+ """
19
+ Manages dynamic batching for inference requests
20
+ """
21
+
22
+ def __init__(self: Any, batch_size: int, max_batch_wait_time: float, model_manager: Any, post_processing_fn: Any) -> None: ...
23
+ """
24
+ Initialize the dynamic batch manager.
25
+
26
+ Args:
27
+ batch_size: Maximum batch size for processing
28
+ max_batch_wait_time: Maximum wait time for batching
29
+ model_manager: Model manager for inference
30
+ post_processing_fn: Function to apply post-processing
31
+ """
32
+
33
+ async def add_request(self: Any, batch_request: Any) -> Tuple[Any, Optional[Dict[str, Any]]]: ...
34
+ """
35
+ Add a request to the batch queue and process if needed
36
+ """
37
+
38
+ async def flush_queue(self: Any) -> int: ...
39
+ """
40
+ Force process all remaining items in the batch queue.
41
+
42
+ Returns:
43
+ Number of items processed
44
+ """
45
+
46
+ def get_stats(self: Any) -> Dict[str, Any]: ...
47
+ """
48
+ Get statistics about the current batching state.
49
+ """
50
+
@@ -0,0 +1,114 @@
1
+ """Auto-generated stub for module: inference_interface."""
2
+ from typing import Any, Dict, List, Optional, Tuple, Union
3
+
4
+ from datetime import datetime, timezone
5
+ from matrice.action_tracker import ActionTracker
6
+ from matrice_inference.deploy.optimize.cache_manager import CacheManager
7
+ from matrice_inference.deploy.optimize.frame_comparators import SSIMComparator
8
+ from matrice_inference.deploy.optimize.frame_difference import IntelligentFrameCache
9
+ from matrice_inference.deploy.server.inference.batch_manager import DynamicBatchManager, BatchRequest
10
+ from matrice_inference.deploy.server.inference.model_manager import ModelManager
11
+ from matrice_inference.deploy.utils.post_processing import PostProcessor, create_config_from_template
12
+ from matrice_inference.deploy.utils.post_processing.config import get_usecase_from_app_name, get_category_from_app_name
13
+ from matrice_inference.deploy.utils.post_processing.core.config import BaseConfig
14
+ import base64
15
+ import cv2
16
+ import logging
17
+ import numpy as np
18
+
19
+ # Classes
20
+ class InferenceInterface:
21
+ """
22
+ Interface for proxying requests to model servers with optional post-processing.
23
+ """
24
+
25
+ def __init__(self: Any, action_tracker: Any, model_manager: Any, batch_size: int = 1, dynamic_batching: bool = False, post_processing_config: Optional[Union[Dict[str, Any], BaseConfig, str]] = None, custom_post_processing_fn: Optional[Callable] = None, max_batch_wait_time: float = 0.05, app_name: str = '') -> None: ...
26
+ """
27
+ Initialize the inference interface.
28
+
29
+ Args:
30
+ action_tracker: Action tracker for category mapping
31
+ model_manager: Model manager for inference
32
+ batch_size: Batch size for processing
33
+ dynamic_batching: Whether to enable dynamic batching
34
+ post_processing_config: Default post-processing configuration
35
+ Can be a dict, BaseConfig object, or use case name string
36
+ custom_post_processing_fn: Custom post-processing function
37
+ max_batch_wait_time: Maximum wait time for batching
38
+ app_name: Application name for automatic config loading
39
+ """
40
+
41
+ async def batch_inference(self: Any, batch_input1: List[Any], batch_input2: Optional[List[Any]] = None, batch_extra_params: Optional[List[Dict[str, Any]]] = None, apply_post_processing: bool = False, post_processing_configs: Optional[List[Union[Dict[str, Any], BaseConfig, str]]] = None, stream_key: Optional[str] = None, stream_info: Optional[Dict[str, Any]] = None, input_hash: Optional[str] = None, camera_info: Optional[Dict[str, Any]] = None) -> List[Tuple[Any, Optional[Dict[str, Any]]]]: ...
42
+ """
43
+ Perform batch inference directly without dynamic batching.
44
+
45
+ Args:
46
+ batch_input1: List of primary input data
47
+ batch_input2: List of secondary input data (optional)
48
+ batch_extra_params: List of additional parameters for each inference (optional)
49
+ apply_post_processing: Whether to apply post-processing
50
+ post_processing_configs: List of post-processing configurations for each input
51
+ stream_key: Stream key for the inference
52
+ stream_info: Stream info for the inference (optional)
53
+ Returns:
54
+ List of tuples containing (inference_result, post_processing_result) for each input.
55
+
56
+ Raises:
57
+ ValueError: If input data is invalid
58
+ RuntimeError: If inference fails
59
+ """
60
+
61
+ def clear_post_processing_cache(self: Any) -> None: ...
62
+ """
63
+ Clear the post-processing cache in the underlying processor.
64
+ """
65
+
66
+ async def flush_batch_queue(self: Any) -> int: ...
67
+ """
68
+ Force process all remaining items in the batch queue.
69
+
70
+ Returns:
71
+ Number of items processed
72
+ """
73
+
74
+ def get_batch_stats(self: Any) -> Dict[str, Any]: ...
75
+ """
76
+ Get statistics about the current batching state.
77
+ """
78
+
79
+ def get_latest_inference_time(self: Any) -> Any: ...
80
+ """
81
+ Get the latest inference time.
82
+ """
83
+
84
+ def get_post_processing_cache_stats(self: Any) -> Dict[str, Any]: ...
85
+ """
86
+ Get post-processing cache statistics from the underlying processor.
87
+
88
+ Returns:
89
+ Dict[str, Any]: Cache statistics including cached instances and keys
90
+ """
91
+
92
+ async def inference(self: Any, input1: Any, input2: Any = None, extra_params: Any = None, apply_post_processing: bool = False, post_processing_config: Optional[Union[Dict[str, Any], BaseConfig, str]] = None, stream_key: Optional[str] = None, stream_info: Optional[Dict[str, Any]] = None, input_hash: Optional[str] = None, camera_info: Optional[Dict[str, Any]] = None) -> Tuple[Any, Optional[Dict[str, Any]]]: ...
93
+ """
94
+ Perform inference using the appropriate client with optional post-processing.
95
+
96
+ Args:
97
+ input1: Primary input data
98
+ input2: Secondary input data (optional)
99
+ extra_params: Additional parameters for inference (optional)
100
+ apply_post_processing: Whether to apply post-processing
101
+ post_processing_config: Post-processing configuration (overrides default)
102
+ stream_key: Stream key for the inference
103
+ stream_info: Stream info for the inference (optional)
104
+ input_hash: Input hash for the inference
105
+ Returns:
106
+ Tuple containing (inference_result, post_processing_result).
107
+ If post-processing is not applied, post_processing_result will be None.
108
+ If post-processing is applied, post_processing_result contains the full post-processing metadata.
109
+
110
+ Raises:
111
+ ValueError: If client is not set up
112
+ RuntimeError: If inference fails
113
+ """
114
+