matrice 1.0.99150__py3-none-any.whl → 1.0.99152__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/__init__.py +2 -1
- matrice/deploy/aggregator/aggregator.py +18 -0
- matrice/deploy/aggregator/ingestor.py +32 -50
- matrice/deploy/aggregator/synchronizer.py +5 -15
- matrice/deploy/server/proxy/proxy_utils.py +1 -1
- matrice/deploy/utils/kafka_utils.py +7 -5
- matrice/deploy/utils/post_processing/usecases/fire_detection.py +9 -1
- {matrice-1.0.99150.dist-info → matrice-1.0.99152.dist-info}/METADATA +1 -1
- {matrice-1.0.99150.dist-info → matrice-1.0.99152.dist-info}/RECORD +12 -12
- {matrice-1.0.99150.dist-info → matrice-1.0.99152.dist-info}/WHEEL +0 -0
- {matrice-1.0.99150.dist-info → matrice-1.0.99152.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99150.dist-info → matrice-1.0.99152.dist-info}/top_level.txt +0 -0
matrice/deploy/__init__.py
CHANGED
@@ -35,6 +35,7 @@ class ResultsAggregator:
|
|
35
35
|
self._aggregation_thread: Optional[threading.Thread] = None
|
36
36
|
self._is_running = False
|
37
37
|
self._lock = threading.RLock()
|
38
|
+
self._sent_keys = set()
|
38
39
|
|
39
40
|
# Statistics
|
40
41
|
self.stats = {
|
@@ -149,6 +150,20 @@ class ResultsAggregator:
|
|
149
150
|
stream_key = sync_result.get("stream_key")
|
150
151
|
input_order = sync_result.get("input_order")
|
151
152
|
stream_group_key = sync_result.get("stream_group_key")
|
153
|
+
|
154
|
+
key = (stream_group_key, stream_key, input_order)
|
155
|
+
if key in self._sent_keys:
|
156
|
+
logging.debug(f"Skipping duplicate result: {key}")
|
157
|
+
return None
|
158
|
+
self._sent_keys.add(key)
|
159
|
+
|
160
|
+
# Basic memory management - prevent unbounded growth
|
161
|
+
if len(self._sent_keys) > 10000:
|
162
|
+
# Remove oldest entries (this is a simple approach)
|
163
|
+
keys_to_remove = list(self._sent_keys)[:2000]
|
164
|
+
for old_key in keys_to_remove:
|
165
|
+
self._sent_keys.discard(old_key)
|
166
|
+
logging.debug(f"Cleaned up {len(keys_to_remove)} old keys from _sent_keys")
|
152
167
|
|
153
168
|
if not stream_key or input_order is None:
|
154
169
|
logging.warning("Missing stream_key or input_order in synchronized result")
|
@@ -265,5 +280,8 @@ class ResultsAggregator:
|
|
265
280
|
self.aggregated_results_queue.get_nowait()
|
266
281
|
except Exception:
|
267
282
|
pass
|
283
|
+
|
284
|
+
# Clear tracking data
|
285
|
+
self._sent_keys.clear()
|
268
286
|
|
269
287
|
logging.info("Results aggregator cleanup completed")
|
@@ -34,7 +34,6 @@ class ResultsIngestor:
|
|
34
34
|
)
|
35
35
|
|
36
36
|
self.consumer_timeout = consumer_timeout
|
37
|
-
self.max_queue_size = 1000 # Maximum queue size for health monitoring
|
38
37
|
|
39
38
|
# Result queues for each deployment (now using PriorityQueue)
|
40
39
|
self.results_queues: Dict[str, PriorityQueue] = {}
|
@@ -76,36 +75,6 @@ class ResultsIngestor:
|
|
76
75
|
self.results_queues[deployment_id] = PriorityQueue()
|
77
76
|
self.stats["queue_sizes"][deployment_id] = 0
|
78
77
|
|
79
|
-
def _get_priority_counter(self, deployment_id: str, stream_key: str, input_order: int) -> Tuple[int, int, int]:
|
80
|
-
"""
|
81
|
-
Get priority tuple for queue ordering with reset detection.
|
82
|
-
|
83
|
-
Returns:
|
84
|
-
Tuple[session_id, temporal_counter, input_order]: Priority tuple for queue ordering
|
85
|
-
"""
|
86
|
-
key = (deployment_id, stream_key)
|
87
|
-
|
88
|
-
# Initialize counters if needed
|
89
|
-
if key not in self._counters:
|
90
|
-
self._counters[key] = itertools.count()
|
91
|
-
self._last_input_order[key] = -1
|
92
|
-
self._session_counters[key] = 0
|
93
|
-
|
94
|
-
# Get current temporal counter (always increments)
|
95
|
-
temporal_counter = next(self._counters[key])
|
96
|
-
|
97
|
-
# Detect input_order reset (significant decrease suggests reset)
|
98
|
-
last_input_order = self._last_input_order[key]
|
99
|
-
if last_input_order != -1 and input_order < last_input_order - 5: # Allow some tolerance for out-of-order
|
100
|
-
# Input order reset detected - increment session counter
|
101
|
-
self._session_counters[key] += 1
|
102
|
-
logging.info(f"Input order reset detected for {deployment_id}:{stream_key} - {last_input_order} -> {input_order}, session: {self._session_counters[key]}")
|
103
|
-
|
104
|
-
# Update last seen input_order
|
105
|
-
self._last_input_order[key] = input_order
|
106
|
-
|
107
|
-
# Return (session_id, temporal_counter, input_order) for proper ordering
|
108
|
-
return (self._session_counters[key], temporal_counter, input_order)
|
109
78
|
|
110
79
|
def start_streaming(self) -> bool:
|
111
80
|
"""
|
@@ -189,6 +158,21 @@ class ResultsIngestor:
|
|
189
158
|
|
190
159
|
logging.info("Results streaming stopped")
|
191
160
|
|
161
|
+
def _get_priority_counter(self, deployment_id: str, stream_key: str, stream_group_key: str) -> int:
|
162
|
+
"""
|
163
|
+
Get priority tuple for queue ordering with reset detection.
|
164
|
+
|
165
|
+
Returns:
|
166
|
+
Tuple[session_id, temporal_counter, input_order]: Priority tuple for queue ordering
|
167
|
+
"""
|
168
|
+
key = (deployment_id, stream_key, stream_group_key)
|
169
|
+
|
170
|
+
# Initialize counters if needed
|
171
|
+
if key not in self._counters:
|
172
|
+
self._counters[key] = itertools.count()
|
173
|
+
|
174
|
+
return next(self._counters[key])
|
175
|
+
|
192
176
|
def _stream_results_to_queue(
|
193
177
|
self, deployment_id: str, results_queue: PriorityQueue
|
194
178
|
):
|
@@ -213,39 +197,31 @@ class ResultsIngestor:
|
|
213
197
|
result_value = result.get("value", {})
|
214
198
|
input_streams = result_value.get("input_streams", [])
|
215
199
|
input_stream = input_streams[0]["input_stream"] if input_streams else {}
|
216
|
-
input_order = input_stream.get("input_order")
|
200
|
+
# input_order = input_stream.get("input_order")
|
217
201
|
camera_info = input_stream.get("camera_info")
|
218
202
|
stream_key = camera_info.get("camera_name")
|
219
|
-
stream_group_key = camera_info.get("camera_group")
|
203
|
+
stream_group_key = camera_info.get("camera_group") or "default_group"
|
220
204
|
|
221
|
-
if not stream_key
|
205
|
+
if not stream_key:
|
222
206
|
logging.warning(
|
223
|
-
f"Missing stream_key
|
224
|
-
f"Stream key: {stream_key},
|
207
|
+
f"Missing stream_key for deployment {deployment_id}, skipping result. "
|
208
|
+
f"Stream key: {stream_key}, Stream group: {stream_group_key}"
|
225
209
|
)
|
226
210
|
continue
|
227
211
|
|
228
|
-
|
229
|
-
# Get priority tuple with reset detection
|
230
|
-
priority_tuple = self._get_priority_counter(deployment_id, stream_key, input_order)
|
231
|
-
session_id, temporal_counter, _ = priority_tuple
|
232
|
-
|
212
|
+
order = self._get_priority_counter(deployment_id, stream_key, stream_group_key)
|
233
213
|
# Create enhanced result object with the structured response
|
234
214
|
enhanced_result = {
|
235
215
|
"deployment_id": deployment_id,
|
236
216
|
"stream_key": stream_key,
|
237
217
|
"stream_group_key": stream_group_key,
|
238
|
-
"input_order":
|
239
|
-
"session_id": session_id, # Add session ID for proper synchronization
|
240
|
-
"temporal_counter": temporal_counter, # Add temporal counter for debugging
|
218
|
+
"input_order": order,
|
241
219
|
"timestamp": time.time(),
|
242
220
|
"result": result_value, # TODO: check if should send this or just agg_summary
|
243
221
|
}
|
244
222
|
|
245
|
-
# Add to priority queue (non-blocking) with proper ordering
|
246
|
-
# Priority: (session_id, temporal_counter, input_order) ensures correct ordering even with resets
|
247
223
|
try:
|
248
|
-
results_queue.put((
|
224
|
+
results_queue.put((order, enhanced_result), block=False)
|
249
225
|
|
250
226
|
with self._lock:
|
251
227
|
self.stats["results_consumed"] += 1
|
@@ -367,10 +343,10 @@ class ResultsIngestor:
|
|
367
343
|
total_queue_size += queue_size
|
368
344
|
|
369
345
|
# Mark as degraded if queue is getting full
|
370
|
-
if queue_size >
|
346
|
+
if queue_size > 1000:
|
371
347
|
health["status"] = "degraded"
|
372
|
-
health["reason"] = f"Queue for {deployment_id} nearly full ({queue_size}
|
373
|
-
logging.warning(f"Ingestor degraded: {deployment_id} queue has {queue_size} items
|
348
|
+
health["reason"] = f"Queue for {deployment_id} nearly full ({queue_size})"
|
349
|
+
logging.warning(f"Ingestor degraded: {deployment_id} queue has {queue_size} items")
|
374
350
|
|
375
351
|
# Check for recent errors (within last 60 seconds)
|
376
352
|
if (
|
@@ -419,4 +395,10 @@ class ResultsIngestor:
|
|
419
395
|
pass
|
420
396
|
|
421
397
|
self.results_queues.clear()
|
398
|
+
|
399
|
+
# Clear tracking data
|
400
|
+
self._counters.clear()
|
401
|
+
self._last_input_order.clear()
|
402
|
+
self._session_counters.clear()
|
403
|
+
|
422
404
|
logging.info("Results streamer cleanup completed")
|
@@ -15,7 +15,7 @@ class ResultsSynchronizer:
|
|
15
15
|
def __init__(
|
16
16
|
self,
|
17
17
|
results_queues: Dict[str, PriorityQueue],
|
18
|
-
sync_timeout: float =
|
18
|
+
sync_timeout: float = 60.0,
|
19
19
|
):
|
20
20
|
"""
|
21
21
|
Initialize the results synchronizer.
|
@@ -79,18 +79,10 @@ class ResultsSynchronizer:
|
|
79
79
|
result = priority_result[1] # Get actual result (2nd element, after priority tuple)
|
80
80
|
|
81
81
|
stream_key = result.get("stream_key")
|
82
|
-
input_order = result.get("input_order")
|
83
|
-
session_id = result.get("session_id")
|
84
82
|
stream_group_key = result.get("stream_group_key")
|
83
|
+
input_order = result.get("input_order")
|
85
84
|
|
86
|
-
|
87
|
-
logging.warning(
|
88
|
-
f"Result missing stream_key, input_order, or session_id from {deployment_id}, skipping. "
|
89
|
-
f"Stream key: {stream_key}, Input order: {input_order}, Session ID: {session_id}, Stream group: {stream_group_key}"
|
90
|
-
)
|
91
|
-
continue
|
92
|
-
|
93
|
-
key = (stream_key, session_id, input_order)
|
85
|
+
key = (stream_group_key, stream_key, input_order)
|
94
86
|
current_time = time.time()
|
95
87
|
|
96
88
|
with self._lock:
|
@@ -105,7 +97,7 @@ class ResultsSynchronizer:
|
|
105
97
|
results_collected += 1
|
106
98
|
|
107
99
|
logging.debug(
|
108
|
-
f"Collected result from {deployment_id} for stream {stream_key},
|
100
|
+
f"Collected result from {deployment_id} for stream {stream_key}, stream group {stream_group_key}"
|
109
101
|
)
|
110
102
|
|
111
103
|
except Empty:
|
@@ -127,7 +119,7 @@ class ResultsSynchronizer:
|
|
127
119
|
is_timeout: bool,
|
128
120
|
) -> Dict:
|
129
121
|
"""Create a synchronized result dictionary with enhanced metadata."""
|
130
|
-
|
122
|
+
stream_group_key, stream_key, input_order = key
|
131
123
|
current_time = time.time()
|
132
124
|
sync_start_time = self._result_timestamps.get(key, current_time)
|
133
125
|
sync_duration = current_time - sync_start_time
|
@@ -155,7 +147,6 @@ class ResultsSynchronizer:
|
|
155
147
|
synchronized_result = {
|
156
148
|
"stream_key": stream_key,
|
157
149
|
"input_order": input_order,
|
158
|
-
"session_id": session_id, # Include session ID for reset tracking
|
159
150
|
"stream_group_key": stream_group_key,
|
160
151
|
"deployment_results": deployment_results.copy(),
|
161
152
|
"synchronization_metadata": {
|
@@ -177,7 +168,6 @@ class ResultsSynchronizer:
|
|
177
168
|
),
|
178
169
|
"sync_completeness_ratio": len(deployment_results) / len(self.deployment_ids),
|
179
170
|
"synchronizer_version": "2.0", # Updated version for session support
|
180
|
-
"session_id": session_id, # Include session ID in metadata
|
181
171
|
},
|
182
172
|
}
|
183
173
|
|
@@ -73,7 +73,7 @@ class AuthKeyValidator:
|
|
73
73
|
else:
|
74
74
|
self.auth_keys_info = []
|
75
75
|
if not self.auth_keys_info:
|
76
|
-
logging.
|
76
|
+
logging.debug("No auth keys found for deployment")
|
77
77
|
return
|
78
78
|
current_time = time.time()
|
79
79
|
self.auth_keys.clear()
|
@@ -64,17 +64,19 @@ class KafkaUtils:
|
|
64
64
|
"""
|
65
65
|
producer_config = {
|
66
66
|
"bootstrap.servers": self.bootstrap_servers,
|
67
|
-
"acks": "
|
67
|
+
"acks": "1",
|
68
68
|
"retries": 1,
|
69
69
|
"retry.backoff.ms": 500,
|
70
70
|
"request.timeout.ms": 30000,
|
71
71
|
"max.in.flight.requests.per.connection": 1,
|
72
|
-
"linger.ms":
|
73
|
-
"batch.size":
|
72
|
+
"linger.ms": 50,
|
73
|
+
"batch.size": 8388608, # 8MB
|
74
74
|
"queue.buffering.max.ms": 100,
|
75
75
|
"message.max.bytes": 25000000, # 25MB
|
76
|
-
'queue.buffering.max.messages':
|
77
|
-
|
76
|
+
'queue.buffering.max.messages': 10000,
|
77
|
+
"delivery.timeout.ms": 600000,
|
78
|
+
"request.timeout.ms": 600000,
|
79
|
+
"compression.type": "snappy"
|
78
80
|
}
|
79
81
|
|
80
82
|
|
@@ -272,7 +272,7 @@ class FireSmokeUseCase(BaseProcessor):
|
|
272
272
|
|
273
273
|
for category, threshold in config.alert_config.count_thresholds.items():
|
274
274
|
if category == "all" and total > threshold:
|
275
|
-
|
275
|
+
print("-----------ALERTS--INNN-----------")
|
276
276
|
alerts.append({
|
277
277
|
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
278
278
|
"alert_id": "alert_"+category+'_'+frame_key,
|
@@ -284,6 +284,7 @@ class FireSmokeUseCase(BaseProcessor):
|
|
284
284
|
}
|
285
285
|
})
|
286
286
|
elif category in summary.get("per_category_count", {}):
|
287
|
+
print("-----------ALERTS--INNN 2-----------")
|
287
288
|
count = summary.get("per_category_count", {})[category]
|
288
289
|
if count > threshold: # Fixed logic: alert when EXCEEDING threshold
|
289
290
|
alerts.append({
|
@@ -333,6 +334,7 @@ class FireSmokeUseCase(BaseProcessor):
|
|
333
334
|
|
334
335
|
for category, threshold in config.alert_config.count_thresholds.items():
|
335
336
|
if category in summary.get("per_category_count", {}):
|
337
|
+
print("-----------INCIDENTSS--INNN-----------")
|
336
338
|
#count = summary.get("per_category_count", {})[category]
|
337
339
|
start_timestamp = self._get_start_timestamp_str(stream_info)
|
338
340
|
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
@@ -581,11 +583,17 @@ class FireSmokeUseCase(BaseProcessor):
|
|
581
583
|
det for det in data
|
582
584
|
if det.get("category", "").lower() in valid_categories
|
583
585
|
]
|
586
|
+
counts = {}
|
587
|
+
for det in detections:
|
588
|
+
cat = det.get('category', 'unknown')
|
589
|
+
counts[cat] = counts.get(cat, 0) + 1
|
590
|
+
|
584
591
|
|
585
592
|
summary = {
|
586
593
|
"total_objects": len(detections),
|
587
594
|
"by_category": {},
|
588
595
|
"detections": detections,
|
596
|
+
"per_category_count": counts,
|
589
597
|
}
|
590
598
|
|
591
599
|
# Count by each category defined in config
|
@@ -89,13 +89,13 @@ matrice/data_processing/data_formats/video_mot_tracking.py,sha256=WlTikrOyVU6O_n
|
|
89
89
|
matrice/data_processing/data_formats/video_mscoco_detection.py,sha256=Z4e0GXcNrj-awwM-rIOlNFJqrb0cZD3KeFrnkYJjT1A,17066
|
90
90
|
matrice/data_processing/data_formats/video_youtube_bb_tracking.py,sha256=6RbuBOW1kfw0E626jGEZ0i5P3upgx8D5EQ0hsNVc9vs,10353
|
91
91
|
matrice/data_processing/data_formats/yolo_detection.py,sha256=qUeZA7_8Of_QWGZlHh-mhRnBFtb5A_u89Oihx-Meg3c,10018
|
92
|
-
matrice/deploy/__init__.py,sha256=
|
92
|
+
matrice/deploy/__init__.py,sha256=7rpz_oCPpW5q_NYuLz7CvRh2VRQnPgYbWIdo_PHvOmU,1238
|
93
93
|
matrice/deploy/aggregator/__init__.py,sha256=HFz-ufUMeSpSyRINcrx6NdmrcuVZtPfyIXxYu5UNLTc,508
|
94
|
-
matrice/deploy/aggregator/aggregator.py,sha256=
|
95
|
-
matrice/deploy/aggregator/ingestor.py,sha256=
|
94
|
+
matrice/deploy/aggregator/aggregator.py,sha256=T5S4qLBkCKs_lFaT83AS1dp422mzhty1NmJkGU00KhU,11447
|
95
|
+
matrice/deploy/aggregator/ingestor.py,sha256=4SwCDvo1ZBwNUJECbHYGcvxGz6U7CSugV5SFmLauBpY,15423
|
96
96
|
matrice/deploy/aggregator/pipeline.py,sha256=xOKDqb4Qre5Ek3Fr-03C-jxpZNXdQ2_Dk5EaOn4ssYE,33948
|
97
97
|
matrice/deploy/aggregator/publisher.py,sha256=SRPnyindD_R_QNK5MJ_WZAeAaGtj79CZ1VNoltsQtXM,15587
|
98
|
-
matrice/deploy/aggregator/synchronizer.py,sha256=
|
98
|
+
matrice/deploy/aggregator/synchronizer.py,sha256=tPFwTWPHnF9c4memBoVB0b6n7v-vvUbdNeWPP4CH6bE,19662
|
99
99
|
matrice/deploy/client/__init__.py,sha256=d7yxlWNHYKOoAM9T_AlrSQw1_9cfLEop4zBE0QqrTVw,1330
|
100
100
|
matrice/deploy/client/client.py,sha256=OnDxQf9m0mGR8a7BlYgsQI8hop6G42cbqHDsYRiW8cI,28533
|
101
101
|
matrice/deploy/client/client_stream_utils.py,sha256=FlG6GkVDMVG9EgKV0-hDi-fP3wJ_AEM0XaTe_TV6pJc,37735
|
@@ -119,9 +119,9 @@ matrice/deploy/server/inference/model_manager.py,sha256=gvCteBmXAAEtjmfWbJqE2CbY
|
|
119
119
|
matrice/deploy/server/inference/triton_utils.py,sha256=fwnrydcdkm9ZdfyRxW2ddTZhN9sVLejAY72ZCGF4XBs,23342
|
120
120
|
matrice/deploy/server/proxy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
121
121
|
matrice/deploy/server/proxy/proxy_interface.py,sha256=f_kNp5Y5WDBXKokoZXBMIS7SGwoYtvSMLhNzKqm9oso,8495
|
122
|
-
matrice/deploy/server/proxy/proxy_utils.py,sha256
|
122
|
+
matrice/deploy/server/proxy/proxy_utils.py,sha256=-ne310WJilwhEK-4vn3-shsSi5V8PmJPtc6OspRYSXE,10423
|
123
123
|
matrice/deploy/utils/__init__.py,sha256=-qzjYt4wl6dxSgmWQzcE5f3-Fp25zmXwtDMZAvreS4E,675
|
124
|
-
matrice/deploy/utils/kafka_utils.py,sha256=
|
124
|
+
matrice/deploy/utils/kafka_utils.py,sha256=e5ex7oZLchlIVSvuHmm_FquvQiKk5iZdMcfttROwEVE,56467
|
125
125
|
matrice/deploy/utils/boundary_drawing_internal/__init__.py,sha256=4mUOm5_T-vf-XA-kS8EKuuDwUIEV2cTYcYsfApe2CbU,1588
|
126
126
|
matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_internal.py,sha256=5SPGXS9EIhJJtvC5qTBBmOTQqSKU2byxHIFgo6Bmt-U,43944
|
127
127
|
matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_tool.py,sha256=eY0VQGZ8BfTmR4_ThIAXaumBjh8_c7w69w-d3kta8p0,15421
|
@@ -175,7 +175,7 @@ matrice/deploy/utils/post_processing/usecases/emergency_vehicle_detection.py,sha
|
|
175
175
|
matrice/deploy/utils/post_processing/usecases/face_emotion.py,sha256=eRfqBdryB0uNoOlz_y-JMuZL1BhPWrI-odqgx_9LT7s,39132
|
176
176
|
matrice/deploy/utils/post_processing/usecases/fashion_detection.py,sha256=f9gpzMDhIW-gyn46k9jgf8nY7YeoqAnTxGOzksabFbE,40457
|
177
177
|
matrice/deploy/utils/post_processing/usecases/field_mapping.py,sha256=JDwYX8pd2W-waDvBh98Y_o_uchJu7wEYbFxOliA4Iq4,39822
|
178
|
-
matrice/deploy/utils/post_processing/usecases/fire_detection.py,sha256=
|
178
|
+
matrice/deploy/utils/post_processing/usecases/fire_detection.py,sha256=w0vCthr1adHpMgJNSh_b9Ej2s_17q6GGN90iCsKnaLI,39467
|
179
179
|
matrice/deploy/utils/post_processing/usecases/flare_analysis.py,sha256=-egmS3Hs_iGOLeCMfapbkfQ04EWtZx97QRuUcDa-jMU,45340
|
180
180
|
matrice/deploy/utils/post_processing/usecases/flower_segmentation.py,sha256=4I7qMx9Ztxg_hy9KTVX-3qBhAN-QwDt_Yigf9fFjLus,52017
|
181
181
|
matrice/deploy/utils/post_processing/usecases/gender_detection.py,sha256=DEnCTRew6B7DtPcBQVCTtpd_IQMvMusBcu6nadUg2oM,40107
|
@@ -227,8 +227,8 @@ matrice/deployment/camera_manager.py,sha256=ReBZqm1CNXRImKcbcZ4uWAT3TUWkof1D28oB
|
|
227
227
|
matrice/deployment/deployment.py,sha256=PLIUD-PxTaC2Zxb3Y12wUddsryV-OJetjCjLoSUh7S4,48103
|
228
228
|
matrice/deployment/inference_pipeline.py,sha256=bXLgd29ViA7o0c7YWLFJl1otBUQfTPb61jS6VawQB0Y,37918
|
229
229
|
matrice/deployment/streaming_gateway_manager.py,sha256=w5swGsuFVfZIdOm2ZuBHRHlRdYYJMLopLsf2gb91lQ8,20946
|
230
|
-
matrice-1.0.
|
231
|
-
matrice-1.0.
|
232
|
-
matrice-1.0.
|
233
|
-
matrice-1.0.
|
234
|
-
matrice-1.0.
|
230
|
+
matrice-1.0.99152.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
|
231
|
+
matrice-1.0.99152.dist-info/METADATA,sha256=4fSj25PDfdJgCXR6iIiqvshkaPJ21CRocpbIqqOkyEI,14624
|
232
|
+
matrice-1.0.99152.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
233
|
+
matrice-1.0.99152.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
|
234
|
+
matrice-1.0.99152.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|