matrice 1.0.99145__py3-none-any.whl → 1.0.99146__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -71,11 +71,13 @@ from .usecases.license_plate_detection import LicensePlateConfig
71
71
  from .usecases.pothole_segmentation import PotholeConfig
72
72
  from .usecases.wound_segmentation import WoundConfig, WoundSegmentationUseCase
73
73
  from .usecases.face_emotion import FaceEmotionConfig
74
+ from .usecases.pipeline_detection import PipelineDetectionUseCase
74
75
  from .usecases.parking_space_detection import ParkingSpaceConfig
75
76
  from .usecases.underwater_pollution_detection import UnderwaterPlasticConfig
76
77
  from .usecases.pedestrian_detection import PedestrianDetectionConfig
77
78
  from .usecases.age_detection import AgeDetectionConfig
78
79
  from .usecases.mask_detection import MaskDetectionConfig
80
+ from .usecases.pipeline_detection import PipelineDetectionConfig
79
81
  from .usecases.chicken_pose_detection import ChickenPoseDetectionConfig
80
82
  from .usecases.field_mapping import FieldMappingConfig, FieldMappingUseCase
81
83
  from .usecases.leaf_disease import LeafDiseaseDetectionConfig, LeafDiseaseDetectionUseCase
@@ -193,6 +195,7 @@ _underwater_pollution_detection = UnderwaterPlasticUseCase()
193
195
  _pedestrian_detection = PedestrianDetectionUseCase()
194
196
  _age_detection = AgeDetectionUseCase()
195
197
  _mask_detection = MaskDetectionUseCase()
198
+ _pipeline_detection = PipelineDetectionUseCase()
196
199
  _banana_defect_detection = BananaMonitoringUseCase()
197
200
  _chicken_pose_detection = ChickenPoseDetectionUseCase()
198
201
  _theft_detection = TheftDetectionUseCase()
@@ -256,6 +259,7 @@ registry.register_use_case(_age_detection.category, _age_detection.name, AgeDete
256
259
  registry.register_use_case(_pricetag_detection.category, _pricetag_detection.name, PriceTagUseCase)
257
260
  registry.register_use_case(_weld_defect_detection.category, _weld_defect_detection.name, WeldDefectUseCase )
258
261
  registry.register_use_case(_mask_detection.category, _mask_detection.name, MaskDetectionUseCase)
262
+ registry.register_use_case(_pipeline_detection.category, _pipeline_detection.name, PipelineDetectionUseCase)
259
263
  registry.register_use_case(_banana_defect_detection.category, _banana_defect_detection.name, BananaMonitoringUseCase)
260
264
  registry.register_use_case(_chicken_pose_detection.category, _chicken_pose_detection.name, ChickenPoseDetectionUseCase)
261
265
  registry.register_use_case(_theft_detection.category, _theft_detection.name, TheftDetectionUseCase)
@@ -1,6 +1,7 @@
1
1
  APP_NAME_TO_USECASE = {
2
2
  "people_counting": "people_counting",
3
3
  "mask_detection": "mask_detection",
4
+ "pipeline_detection": "pipeline_detection",
4
5
  "vehicle_monitoring": "vehicle_monitoring",
5
6
  "vehicle_type_monitoring": "vehicle_monitoring",
6
7
  "weapon_detection": "weapon_detection",
@@ -33,6 +34,7 @@ APP_NAME_TO_USECASE = {
33
34
  APP_NAME_TO_CATEGORY = {
34
35
  "people_counting": "general",
35
36
  "mask_detection": "mask_detection",
37
+ "pipeline_detection": "pipeline_detection",
36
38
  "vehicle_monitoring": "traffic",
37
39
  "vehicle_type_monitoring": "traffic",
38
40
  "weapon_detection": "security",
@@ -415,6 +415,7 @@ class ConfigManager:
415
415
  "fire_smoke_detection": None,
416
416
  "flare_analysis" : None,
417
417
  "mask_detection": None,
418
+ "pipeline_detection": None,
418
419
  "parking_space_detection": None,
419
420
  "car_damage_detection":None,
420
421
  "weld_defect_detection" : None,
@@ -592,6 +593,7 @@ class ConfigManager:
592
593
  return ParkingSpaceConfig
593
594
  except ImportError:
594
595
  return None
596
+
595
597
  def _get_mask_detection_config_class(self):
596
598
  """Register a configuration class for a use case."""
597
599
  try:
@@ -600,6 +602,14 @@ class ConfigManager:
600
602
  except ImportError:
601
603
  return None
602
604
 
605
+ def _get_pipeline_detection_config_class(self):
606
+ """Register a configuration class for a use case."""
607
+ try:
608
+ from ..usecases.pipeline_detection import PipelineDetectionConfig
609
+ return PipelineDetectionConfig
610
+ except ImportError:
611
+ return None
612
+
603
613
  def _get_pothole_segmentation_config_class(self):
604
614
  """Register a configuration class for a use case."""
605
615
  try:
@@ -972,6 +982,19 @@ class ConfigManager:
972
982
  alert_config=alert_config,
973
983
  **kwargs
974
984
  )
985
+ elif usecase == "pipeline_detection":
986
+ from ..usecases.pipeline_detection import PipelineDetectionConfig
987
+
988
+ alert_config = kwargs.pop("alert_config", None)
989
+ if alert_config and isinstance(alert_config, dict):
990
+ alert_config = AlertConfig(**alert_config)
991
+
992
+ config = PipelineDetectionConfig(
993
+ category=category or "pipeline_detection",
994
+ usecase=usecase,
995
+ alert_config=alert_config,
996
+ **kwargs
997
+ )
975
998
  elif usecase == "shoplifting_detection":
976
999
  # Import here to avoid circular import
977
1000
  from ..usecases.shoplifting_detection import ShopliftingDetectionConfig
@@ -1759,6 +1782,11 @@ class ConfigManager:
1759
1782
  from ..usecases.mask_detection import MaskDetectionConfig
1760
1783
  default_config = MaskDetectionConfig()
1761
1784
  return default_config.to_dict()
1785
+
1786
+ elif usecase == "pipeline_detection":
1787
+ from ..usecases.pipeline_detection import PipelineDetectionConfig
1788
+ default_config = PipelineDetectionConfig()
1789
+ return default_config.to_dict()
1762
1790
 
1763
1791
  elif usecase == "fire_smoke_detection":
1764
1792
  # Import here to avoid circular import
@@ -51,6 +51,7 @@ from .usecases import (
51
51
  ParkingUseCase,
52
52
  FaceEmotionUseCase,
53
53
  UnderwaterPlasticUseCase,
54
+ PipelineDetectionUseCase,
54
55
  PedestrianDetectionUseCase,
55
56
  ChickenPoseDetectionUseCase,
56
57
  TheftDetectionUseCase,
@@ -175,6 +176,7 @@ class PostProcessor:
175
176
  registry.register_use_case("weld", "weld_defect_detection", WeldDefectUseCase)
176
177
  registry.register_use_case("price_tag", "price_tag_detection", PriceTagUseCase)
177
178
  registry.register_use_case("mask_detection", "mask_detection", MaskDetectionUseCase)
179
+ registry.register_use_case("pipeline_detection", "pipeline_detection", PipelineDetectionUseCase)
178
180
  registry.register_use_case("automobile", "distracted_driver_detection", DistractedDriverUseCase)
179
181
  registry.register_use_case("traffic", "emergency_vehicle_detection", EmergencyVehicleUseCase)
180
182
  registry.register_use_case("energy", "solar_panel", SolarPanelUseCase)
@@ -28,6 +28,7 @@ from .banana_defect_detection import BananaMonitoringUseCase,BananaMonitoringCon
28
28
  from .car_damage_detection import CarDamageConfig, CarDamageDetectionUseCase
29
29
  from .price_tag_detection import PriceTagConfig, PriceTagUseCase
30
30
  from .mask_detection import MaskDetectionConfig, MaskDetectionUseCase
31
+ from .pipeline_detection import PipelineDetectionConfig, PipelineDetectionUseCase
31
32
  from .distracted_driver_detection import DistractedDriverUseCase, DistractedDriverConfig
32
33
  from .emergency_vehicle_detection import EmergencyVehicleUseCase, EmergencyVehicleConfig
33
34
  from .solar_panel import SolarPanelUseCase, SolarPanelConfig
@@ -0,0 +1,605 @@
1
+ """
2
+ Pipeline Monitoring Use Case for Post-Processing
3
+
4
+ This module provides Pipeline monitoring functionality with congestion detection,
5
+ zone analysis, and alert generation.
6
+
7
+ """
8
+
9
+ from typing import Any, Dict, List, Optional
10
+ from dataclasses import asdict
11
+ import time
12
+ from datetime import datetime, timezone
13
+
14
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
15
+ from ..utils import (
16
+ filter_by_confidence,
17
+ filter_by_categories,
18
+ apply_category_mapping,
19
+ count_objects_by_category,
20
+ count_objects_in_zones,
21
+ calculate_counting_summary,
22
+ match_results_structure,
23
+ bbox_smoothing,
24
+ BBoxSmoothingConfig,
25
+ BBoxSmoothingTracker
26
+ )
27
+ from dataclasses import dataclass, field
28
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
29
+
30
+
31
+ @dataclass
32
+ class PipelineDetectionConfig(BaseConfig):
33
+ """Configuration for pipeline detection use case in pipeline monitoring."""
34
+ # Smoothing configuration
35
+ enable_smoothing: bool = True
36
+ smoothing_algorithm: str = "observability" # "window" or "observability"
37
+ smoothing_window_size: int = 20
38
+ smoothing_cooldown_frames: int = 5
39
+ smoothing_confidence_range_factor: float = 0.5
40
+
41
+ # confidence thresholds
42
+ confidence_threshold: float = 0.4
43
+
44
+ usecase_categories: List[str] = field(
45
+ default_factory=lambda: ['pipe']
46
+ )
47
+
48
+ target_categories: List[str] = field(
49
+ default_factory=lambda: ['pipe']
50
+ )
51
+
52
+ alert_config: Optional[AlertConfig] = None
53
+
54
+ index_to_category: Optional[Dict[int, str]] = field(
55
+ default_factory=lambda: {
56
+ 0: "pipe"
57
+ }
58
+ )
59
+
60
+
61
+ class PipelineDetectionUseCase(BaseProcessor):
62
+ # Human-friendly display names for categories
63
+ CATEGORY_DISPLAY = {
64
+ "pipe": "pipe"
65
+ }
66
+ def __init__(self):
67
+ super().__init__("pipeline_detection")
68
+ self.category = "pipeline_detection"
69
+
70
+ self.CASE_TYPE: Optional[str] = 'pipeline_detection'
71
+ self.CASE_VERSION: Optional[str] = '1.0'
72
+
73
+ # List of categories to track
74
+ self.target_categories = ['pipe']
75
+
76
+ # Initialize smoothing tracker
77
+ self.smoothing_tracker = None
78
+
79
+ # Initialize advanced tracker (will be created on first use)
80
+ self.tracker = None
81
+
82
+ # Initialize tracking state variables
83
+ self._total_frame_counter = 0
84
+ self._global_frame_offset = 0
85
+
86
+ # Track start time for "TOTAL SINCE" calculation
87
+ self._tracking_start_time = None
88
+
89
+ self._track_aliases: Dict[Any, Any] = {}
90
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
91
+ # Tunable parameters – adjust if necessary for specific scenarios
92
+ self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
93
+ self._track_merge_time_window: float = 7.0 # seconds within which to merge
94
+
95
+ self._ascending_alert_list: List[int] = []
96
+ self.current_incident_end_timestamp: str = "N/A"
97
+
98
+
99
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
100
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
101
+ start_time = time.time()
102
+ if not isinstance(config, PipelineDetectionConfig):
103
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
104
+ if context is None:
105
+ context = ProcessingContext()
106
+
107
+ input_format = match_results_structure(data)
108
+ context.input_format = input_format
109
+ context.confidence_threshold = config.confidence_threshold
110
+
111
+ if config.confidence_threshold is not None:
112
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
113
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
114
+ else:
115
+ processed_data = data
116
+ self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
117
+
118
+ if config.index_to_category:
119
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
120
+ self.logger.debug("Applied category mapping")
121
+
122
+ if config.target_categories:
123
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
124
+ self.logger.debug(f"Applied category filtering")
125
+
126
+ if config.enable_smoothing:
127
+ if self.smoothing_tracker is None:
128
+ smoothing_config = BBoxSmoothingConfig(
129
+ smoothing_algorithm=config.smoothing_algorithm,
130
+ window_size=config.smoothing_window_size,
131
+ cooldown_frames=config.smoothing_cooldown_frames,
132
+ confidence_threshold=config.confidence_threshold,
133
+ confidence_range_factor=config.smoothing_confidence_range_factor,
134
+ enable_smoothing=True
135
+ )
136
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
137
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
138
+
139
+ try:
140
+ from ..advanced_tracker import AdvancedTracker
141
+ from ..advanced_tracker.config import TrackerConfig
142
+ if self.tracker is None:
143
+ tracker_config = TrackerConfig()
144
+ self.tracker = AdvancedTracker(tracker_config)
145
+ self.logger.info("Initialized AdvancedTracker for Pipeline Monitoring and tracking")
146
+ processed_data = self.tracker.update(processed_data)
147
+ except Exception as e:
148
+ self.logger.warning(f"AdvancedTracker failed: {e}")
149
+
150
+ self._update_tracking_state(processed_data)
151
+ self._total_frame_counter += 1
152
+
153
+ frame_number = None
154
+ if stream_info:
155
+ input_settings = stream_info.get("input_settings", {})
156
+ start_frame = input_settings.get("start_frame")
157
+ end_frame = input_settings.get("end_frame")
158
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
159
+ frame_number = start_frame
160
+
161
+ general_counting_summary = calculate_counting_summary(data)
162
+ counting_summary = self._count_categories(processed_data, config)
163
+ total_counts = self.get_total_counts()
164
+ counting_summary['total_counts'] = total_counts
165
+
166
+ alerts = self._check_alerts(counting_summary, frame_number, config)
167
+ predictions = self._extract_predictions(processed_data)
168
+
169
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
170
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
171
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
172
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
173
+
174
+ incidents = incidents_list[0] if incidents_list else {}
175
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
176
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
177
+ summary = summary_list[0] if summary_list else {}
178
+ agg_summary = {str(frame_number): {
179
+ "incidents": incidents,
180
+ "tracking_stats": tracking_stats,
181
+ "business_analytics": business_analytics,
182
+ "alerts": alerts,
183
+ "human_text": summary}
184
+ }
185
+
186
+ context.mark_completed()
187
+
188
+ result = self.create_result(
189
+ data={"agg_summary": agg_summary},
190
+ usecase=self.name,
191
+ category=self.category,
192
+ context=context
193
+ )
194
+ return result
195
+
196
+
197
+ def _check_alerts(self, summary: dict, frame_number:Any, config: PipelineDetectionConfig) -> List[Dict]:
198
+ def get_trend(data, lookback=900, threshold=0.6):
199
+ window = data[-lookback:] if len(data) >= lookback else data
200
+ if len(window) < 2:
201
+ return True
202
+ increasing = 0
203
+ total = 0
204
+ for i in range(1, len(window)):
205
+ if window[i] >= window[i - 1]:
206
+ increasing += 1
207
+ total += 1
208
+ ratio = increasing / total
209
+ if ratio >= threshold:
210
+ return True
211
+ elif ratio <= (1 - threshold):
212
+ return False
213
+
214
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
215
+ alerts = []
216
+ total_detections = summary.get("total_count", 0)
217
+ total_counts_dict = summary.get("total_counts", {})
218
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
219
+ per_category_count = summary.get("per_category_count", {})
220
+
221
+ if not config.alert_config:
222
+ return alerts
223
+
224
+ total = summary.get("total_count", 0)
225
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
226
+ for category, threshold in config.alert_config.count_thresholds.items():
227
+ if category == "all" and total > threshold:
228
+ alerts.append({
229
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
230
+ "alert_id": "alert_"+category+'_'+frame_key,
231
+ "incident_category": self.CASE_TYPE,
232
+ "threshold_level": threshold,
233
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
234
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'], getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
235
+ })
236
+ elif category in summary.get("per_category_count", {}):
237
+ count = summary.get("per_category_count", {})[category]
238
+ if count > threshold:
239
+ alerts.append({
240
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
241
+ "alert_id": "alert_"+category+'_'+frame_key,
242
+ "incident_category": self.CASE_TYPE,
243
+ "threshold_level": threshold,
244
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
245
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'], getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
246
+ })
247
+ return alerts
248
+
249
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: PipelineDetectionConfig, frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
250
+ incidents = []
251
+ total_detections = counting_summary.get("total_count", 0)
252
+ current_timestamp = self._get_current_timestamp_str(stream_info)
253
+ camera_info = self.get_camera_info_from_stream(stream_info)
254
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
255
+ if total_detections > 0:
256
+ level = "low"
257
+ intensity = 5.0
258
+ start_timestamp = self._get_start_timestamp_str(stream_info)
259
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
260
+ self.current_incident_end_timestamp = 'Incident still active'
261
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
262
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
263
+ self.current_incident_end_timestamp = current_timestamp
264
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
265
+ self.current_incident_end_timestamp = 'N/A'
266
+ if config.alert_config and config.alert_config.count_thresholds:
267
+ threshold = config.alert_config.count_thresholds.get("all", 15)
268
+ intensity = min(10.0, (total_detections / threshold) * 10)
269
+ if intensity >= 9:
270
+ level = "critical"
271
+ self._ascending_alert_list.append(3)
272
+ elif intensity >= 7:
273
+ level = "significant"
274
+ self._ascending_alert_list.append(2)
275
+ elif intensity >= 5:
276
+ level = "medium"
277
+ self._ascending_alert_list.append(1)
278
+ else:
279
+ level = "low"
280
+ self._ascending_alert_list.append(0)
281
+ else:
282
+ if total_detections > 30:
283
+ level = "critical"
284
+ intensity = 10.0
285
+ self._ascending_alert_list.append(3)
286
+ elif total_detections > 25:
287
+ level = "significant"
288
+ intensity = 9.0
289
+ self._ascending_alert_list.append(2)
290
+ elif total_detections > 15:
291
+ level = "medium"
292
+ intensity = 7.0
293
+ self._ascending_alert_list.append(1)
294
+ else:
295
+ level = "low"
296
+ intensity = min(10.0, total_detections / 3.0)
297
+ self._ascending_alert_list.append(0)
298
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
299
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
300
+ human_text = "\n".join(human_text_lines)
301
+ alert_settings=[]
302
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
303
+ alert_settings.append({
304
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
305
+ "incident_category": self.CASE_TYPE,
306
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
307
+ "ascending": True,
308
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'], getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
309
+ })
310
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
311
+ severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
312
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
313
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
314
+ incidents.append(event)
315
+ else:
316
+ self._ascending_alert_list.append(0)
317
+ incidents.append({})
318
+ return incidents
319
+
320
+ def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: PipelineDetectionConfig, frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
321
+ camera_info = self.get_camera_info_from_stream(stream_info)
322
+ tracking_stats = []
323
+ total_detections = counting_summary.get("total_count", 0)
324
+ total_counts_dict = counting_summary.get("total_counts", {})
325
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
326
+ per_category_count = counting_summary.get("per_category_count", {})
327
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
328
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
329
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
330
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
331
+ total_counts = []
332
+ for cat, count in total_counts_dict.items():
333
+ if count > 0:
334
+ total_counts.append({"category": cat, "count": count})
335
+ current_counts = []
336
+ for cat, count in per_category_count.items():
337
+ if count > 0 or total_detections > 0:
338
+ current_counts.append({"category": cat, "count": count})
339
+ detections = []
340
+ for detection in counting_summary.get("detections", []):
341
+ bbox = detection.get("bounding_box", {})
342
+ category = detection.get("category", "pipe")
343
+ detection_obj = self.create_detection_object(category, bbox)
344
+ detections.append(detection_obj)
345
+ alert_settings = []
346
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
347
+ alert_settings.append({
348
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
349
+ "incident_category": self.CASE_TYPE,
350
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
351
+ "ascending": True,
352
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'], getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
353
+ })
354
+ human_text_lines = [f"Tracking Statistics:"]
355
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
356
+ for cat, count in per_category_count.items():
357
+ human_text_lines.append(f"\t{cat}: {count}")
358
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
359
+ for cat, count in total_counts_dict.items():
360
+ if count > 0:
361
+ human_text_lines.append(f"\t{cat}: {count}")
362
+ if alerts:
363
+ for alert in alerts:
364
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
365
+ else:
366
+ human_text_lines.append("Alerts: None")
367
+ human_text = "\n".join(human_text_lines)
368
+ reset_settings=[{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
369
+ tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
370
+ detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
371
+ reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
372
+ reset_time=high_precision_reset_timestamp)
373
+ tracking_stats.append(tracking_stat)
374
+ return tracking_stats
375
+
376
+ def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: PipelineDetectionConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
377
+ if is_empty:
378
+ return []
379
+
380
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
381
+ lines = {}
382
+ lines["Application Name"] = self.CASE_TYPE
383
+ lines["Application Version"] = self.CASE_VERSION
384
+ if len(incidents) > 0:
385
+ lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
386
+ if len(tracking_stats) > 0:
387
+ lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
388
+ if len(business_analytics) > 0:
389
+ lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
390
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
391
+ lines["Summary"] = "No Summary Data"
392
+ return [lines]
393
+
394
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
395
+ frame_track_ids = set()
396
+ for det in detections:
397
+ tid = det.get('track_id')
398
+ if tid is not None:
399
+ frame_track_ids.add(tid)
400
+ total_track_ids = set()
401
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
402
+ total_track_ids.update(s)
403
+ return {
404
+ "total_count": len(total_track_ids),
405
+ "current_frame_count": len(frame_track_ids),
406
+ "total_unique_track_ids": len(total_track_ids),
407
+ "current_frame_track_ids": list(frame_track_ids),
408
+ "last_update_time": time.time(),
409
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
410
+ }
411
+
412
+ def _update_tracking_state(self, detections: list):
413
+ if not hasattr(self, "_per_category_total_track_ids"):
414
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
415
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
416
+ for det in detections:
417
+ cat = det.get("category")
418
+ raw_track_id = det.get("track_id")
419
+ if cat not in self.target_categories or raw_track_id is None:
420
+ continue
421
+ bbox = det.get("bounding_box", det.get("bbox"))
422
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
423
+ det["track_id"] = canonical_id
424
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
425
+ self._current_frame_track_ids[cat].add(canonical_id)
426
+
427
+ def get_total_counts(self):
428
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
429
+
430
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
431
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
432
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
433
+
434
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
435
+ hours = int(timestamp // 3600)
436
+ minutes = int((timestamp % 3600) // 60)
437
+ seconds = round(float(timestamp % 60),2)
438
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
439
+
440
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
441
+ if not stream_info:
442
+ return "00:00:00.00"
443
+ if precision:
444
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
445
+ if frame_id:
446
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
447
+ else:
448
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
449
+ stream_time_str = self._format_timestamp_for_video(start_time)
450
+ return stream_time_str
451
+ else:
452
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
453
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
454
+ if frame_id:
455
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
456
+ else:
457
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
458
+ stream_time_str = self._format_timestamp_for_video(start_time)
459
+ return stream_time_str
460
+ else:
461
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
462
+ if stream_time_str:
463
+ try:
464
+ timestamp_str = stream_time_str.replace(" UTC", "")
465
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
466
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
467
+ return self._format_timestamp_for_stream(timestamp)
468
+ except:
469
+ return self._format_timestamp_for_stream(time.time())
470
+ else:
471
+ return self._format_timestamp_for_stream(time.time())
472
+
473
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
474
+ if not stream_info:
475
+ return "00:00:00"
476
+ if precision:
477
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
478
+ return "00:00:00"
479
+ else:
480
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
481
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
482
+ return "00:00:00"
483
+ else:
484
+ if self._tracking_start_time is None:
485
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
486
+ if stream_time_str:
487
+ try:
488
+ timestamp_str = stream_time_str.replace(" UTC", "")
489
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
490
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
491
+ except:
492
+ self._tracking_start_time = time.time()
493
+ else:
494
+ self._tracking_start_time = time.time()
495
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
496
+ dt = dt.replace(minute=0, second=0, microsecond=0)
497
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
498
+
499
+ def _count_categories(self, detections: list, config: PipelineDetectionConfig) -> dict:
500
+ counts = {}
501
+ for det in detections:
502
+ cat = det.get('category', 'unknown')
503
+ counts[cat] = counts.get(cat, 0) + 1
504
+ return {
505
+ "total_count": sum(counts.values()),
506
+ "per_category_count": counts,
507
+ "detections": [
508
+ {
509
+ "bounding_box": det.get("bounding_box"),
510
+ "category": det.get("category"),
511
+ "confidence": det.get("confidence"),
512
+ "track_id": det.get("track_id"),
513
+ "frame_id": det.get("frame_id")
514
+ }
515
+ for det in detections
516
+ ]
517
+ }
518
+
519
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
520
+ return [
521
+ {
522
+ "category": det.get("category", "unknown"),
523
+ "confidence": det.get("confidence", 0.0),
524
+ "bounding_box": det.get("bounding_box", {})
525
+ }
526
+ for det in detections
527
+ ]
528
+
529
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
530
+ def _bbox_to_list(bbox):
531
+ if bbox is None:
532
+ return []
533
+ if isinstance(bbox, list):
534
+ return bbox[:4] if len(bbox) >= 4 else []
535
+ if isinstance(bbox, dict):
536
+ if "xmin" in bbox:
537
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
538
+ if "x1" in bbox:
539
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
540
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
541
+ return values[:4] if len(values) >= 4 else []
542
+ return []
543
+ l1 = _bbox_to_list(box1)
544
+ l2 = _bbox_to_list(box2)
545
+ if len(l1) < 4 or len(l2) < 4:
546
+ return 0.0
547
+ x1_min, y1_min, x1_max, y1_max = l1
548
+ x2_min, y2_min, x2_max, y2_max = l2
549
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
550
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
551
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
552
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
553
+ inter_x_min = max(x1_min, x2_min)
554
+ inter_y_min = max(y1_min, y2_min)
555
+ inter_x_max = min(x1_max, x2_max)
556
+ inter_y_max = min(y1_max, y2_max)
557
+ inter_w = max(0.0, inter_x_max - inter_x_min)
558
+ inter_h = max(0.0, inter_y_max - inter_y_min)
559
+ inter_area = inter_w * inter_h
560
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
561
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
562
+ union_area = area1 + area2 - inter_area
563
+ return (inter_area / union_area) if union_area > 0 else 0.0
564
+
565
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
566
+ if raw_id is None or bbox is None:
567
+ return raw_id
568
+ now = time.time()
569
+ if raw_id in self._track_aliases:
570
+ canonical_id = self._track_aliases[raw_id]
571
+ track_info = self._canonical_tracks.get(canonical_id)
572
+ if track_info is not None:
573
+ track_info["last_bbox"] = bbox
574
+ track_info["last_update"] = now
575
+ track_info["raw_ids"].add(raw_id)
576
+ return canonical_id
577
+ for canonical_id, info in self._canonical_tracks.items():
578
+ if now - info["last_update"] > self._track_merge_time_window:
579
+ continue
580
+ iou = self._compute_iou(bbox, info["last_bbox"])
581
+ if iou >= self._track_merge_iou_threshold:
582
+ self._track_aliases[raw_id] = canonical_id
583
+ info["last_bbox"] = bbox
584
+ info["last_update"] = now
585
+ info["raw_ids"].add(raw_id)
586
+ return canonical_id
587
+ canonical_id = raw_id
588
+ self._track_aliases[raw_id] = canonical_id
589
+ self._canonical_tracks[canonical_id] = {
590
+ "last_bbox": bbox,
591
+ "last_update": now,
592
+ "raw_ids": {raw_id},
593
+ }
594
+ return canonical_id
595
+
596
+ def _format_timestamp(self, timestamp: float) -> str:
597
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
598
+
599
+ def _get_tracking_start_time(self) -> str:
600
+ if self._tracking_start_time is None:
601
+ return "N/A"
602
+ return self._format_timestamp(self._tracking_start_time)
603
+
604
+ def _set_tracking_start_time(self) -> None:
605
+ self._tracking_start_time = time.time()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: matrice
3
- Version: 1.0.99145
3
+ Version: 1.0.99146
4
4
  Summary: SDK for connecting to matrice.ai services
5
5
  Home-page: https://github.com/matrice-ai/python-sdk
6
6
  Author: Matrice.ai
@@ -126,9 +126,9 @@ matrice/deploy/utils/boundary_drawing_internal/__init__.py,sha256=4mUOm5_T-vf-XA
126
126
  matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_internal.py,sha256=5SPGXS9EIhJJtvC5qTBBmOTQqSKU2byxHIFgo6Bmt-U,43944
127
127
  matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_tool.py,sha256=eY0VQGZ8BfTmR4_ThIAXaumBjh8_c7w69w-d3kta8p0,15421
128
128
  matrice/deploy/utils/boundary_drawing_internal/example_usage.py,sha256=cUBhxxsVdTQWIPvIOjCUGrhqon7ZBr5N6qNewjrTIuk,6434
129
- matrice/deploy/utils/post_processing/__init__.py,sha256=QaVlnp8KYxWlFabtX4dcZzGHcNL_Wf1E1usnd67sCKY,22862
130
- matrice/deploy/utils/post_processing/config.py,sha256=AaWAGXW2NNbRa_8kyXfHSyT8Vuyx-MblWuXJ915iGq0,3128
131
- matrice/deploy/utils/post_processing/processor.py,sha256=_tRjlZH-HEAwT0bHDCcXkbP6Hmx0m8DD1NMmYh4xMs0,30414
129
+ matrice/deploy/utils/post_processing/__init__.py,sha256=4ncfhhVFoKIGzCXPcnOnzmAyouNplqvdnFcyCPXk3wY,23151
130
+ matrice/deploy/utils/post_processing/config.py,sha256=KjEPeKUGjq6QFPYJc8WWyHDR11gogHbhC4bya4TP-ns,3224
131
+ matrice/deploy/utils/post_processing/processor.py,sha256=x6XdUva0tOr0eUfO_YFpxaUhcSK96cppGBeaeobfXAo,30549
132
132
  matrice/deploy/utils/post_processing/advanced_tracker/__init__.py,sha256=tAPFzI_Yep5TLX60FDwKqBqppc-EbxSr0wNsQ9DGI1o,423
133
133
  matrice/deploy/utils/post_processing/advanced_tracker/base.py,sha256=VqWy4dd5th5LK-JfueTt2_GSEoOi5QQfQxjTNhmQoLc,3580
134
134
  matrice/deploy/utils/post_processing/advanced_tracker/config.py,sha256=hEVJVbh4uUrbIynmoq4OhuxF2IZA5AMCBLpixScp5FI,2865
@@ -138,7 +138,7 @@ matrice/deploy/utils/post_processing/advanced_tracker/strack.py,sha256=rVH2xOysZ
138
138
  matrice/deploy/utils/post_processing/advanced_tracker/tracker.py,sha256=D-PKZ2Pxutmlu--icyxuxjvnWBrzrmZcEChYS0nx00M,14328
139
139
  matrice/deploy/utils/post_processing/core/__init__.py,sha256=sCdnjfgypTh3TsnyAYJtN0Z8EQne96Nk4j7ICQVXjWE,1312
140
140
  matrice/deploy/utils/post_processing/core/base.py,sha256=V_DmaMLtrIunrN8Aq9iLeMIQPlkbCE-9d7n0Yz-nKQg,28228
141
- matrice/deploy/utils/post_processing/core/config.py,sha256=n-nPjVnfgErVJ_DJABEcwIxUPQppeLSFhLowM0COCYU,80621
141
+ matrice/deploy/utils/post_processing/core/config.py,sha256=4y4Z3lFAw2N4mNM22tz4TRdOZUeBJKZNBDmtf9zKcdI,81723
142
142
  matrice/deploy/utils/post_processing/core/config_utils.py,sha256=fVZbYRWJr7dq7mz3FMYBVbYUwWDB-5t7oBuhJix9ghE,23102
143
143
  matrice/deploy/utils/post_processing/test_cases/__init__.py,sha256=zUU2kKrIcCl8WeyjjQViwp7PWTZlKPuF8M2pZkxoNNQ,42
144
144
  matrice/deploy/utils/post_processing/test_cases/run_tests.py,sha256=RBFGvxFR-gozxnQFzkWLrs90vLlp8Bsn-Z7MLQrNw4o,4731
@@ -152,7 +152,7 @@ matrice/deploy/utils/post_processing/test_cases/test_people_counting.py,sha256=j
152
152
  matrice/deploy/utils/post_processing/test_cases/test_processor.py,sha256=nwF2EIAnQAuLAFpMH4sJjHHN-t6UN3DAydBPV6L4wAk,19802
153
153
  matrice/deploy/utils/post_processing/test_cases/test_utilities.py,sha256=lmT5bp5_T5yYy1HQ4X01myfScAqnMgf4pd7hHBCjr6A,13414
154
154
  matrice/deploy/utils/post_processing/test_cases/test_utils.py,sha256=bfmOT1rr9asv3jpr-p_UrjnnSZ1qEWM2LEqNKkyvJZ8,29370
155
- matrice/deploy/utils/post_processing/usecases/__init__.py,sha256=Nb13P5IZSs7tl9j-RZWE0ryuVqu6fvGTAfJhWe3644k,7323
155
+ matrice/deploy/utils/post_processing/usecases/__init__.py,sha256=kyFqU_7j42042wmnYRLWiCM9wqv5BI2CD3AVMQuWe3c,7405
156
156
  matrice/deploy/utils/post_processing/usecases/advanced_customer_service.py,sha256=ELt5euxr6P4X2s8-YGngmj27QscOHefjOsx3774sNFk,75914
157
157
  matrice/deploy/utils/post_processing/usecases/age_detection.py,sha256=cgOIx2zqPd_OWQMfTDV_4HSlvXlI8eKcZSEZ85SnZWA,35263
158
158
  matrice/deploy/utils/post_processing/usecases/anti_spoofing_detection.py,sha256=XdtDdXGzZMLQdWcoOoiE5t4LPYHhgOtJ7tZCNlq1A2E,31329
@@ -187,6 +187,7 @@ matrice/deploy/utils/post_processing/usecases/parking.py,sha256=lqTGqcjUZZPFw3tu
187
187
  matrice/deploy/utils/post_processing/usecases/parking_space_detection.py,sha256=1bZaspmzvJAggyr4Lk_hPX79xm6NFpGakAxbuqDnHb8,34524
188
188
  matrice/deploy/utils/post_processing/usecases/pedestrian_detection.py,sha256=hPFtvpWXXEsbDavmuiXIhrosMNlOhGya--jukT-ZOHA,39288
189
189
  matrice/deploy/utils/post_processing/usecases/people_counting.py,sha256=mDJOwcrs9OO4jIbJVr_ItWvjjGP2mgGFYlrP3R-mH2E,76528
190
+ matrice/deploy/utils/post_processing/usecases/pipeline_detection.py,sha256=VsLTXMAqx0tRw7Olrxqx7SBLolZR7p2aFOrdSXLS-kE,30796
190
191
  matrice/deploy/utils/post_processing/usecases/plaque_segmentation_img.py,sha256=d__a0PkkObYVoC-Q5-2bFVfeyKnQHtB5xVAKVOCeFyk,41925
191
192
  matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py,sha256=6Mv8SoEE5CGItY7S0g-SY5Lb3DV-WWVMlpEp04a86a8,43197
192
193
  matrice/deploy/utils/post_processing/usecases/ppe_compliance.py,sha256=G9P9j9E9nfNJInHJxmK1Lb4daFBlG5hq0aqotTLvFFE,30146
@@ -225,8 +226,8 @@ matrice/deployment/camera_manager.py,sha256=ReBZqm1CNXRImKcbcZ4uWAT3TUWkof1D28oB
225
226
  matrice/deployment/deployment.py,sha256=PLIUD-PxTaC2Zxb3Y12wUddsryV-OJetjCjLoSUh7S4,48103
226
227
  matrice/deployment/inference_pipeline.py,sha256=bXLgd29ViA7o0c7YWLFJl1otBUQfTPb61jS6VawQB0Y,37918
227
228
  matrice/deployment/streaming_gateway_manager.py,sha256=w5swGsuFVfZIdOm2ZuBHRHlRdYYJMLopLsf2gb91lQ8,20946
228
- matrice-1.0.99145.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
229
- matrice-1.0.99145.dist-info/METADATA,sha256=SZvAGseewa2r30bvP0UDZ6hMk3qOQVPIGonmT3kTSRc,14624
230
- matrice-1.0.99145.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
231
- matrice-1.0.99145.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
232
- matrice-1.0.99145.dist-info/RECORD,,
229
+ matrice-1.0.99146.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
230
+ matrice-1.0.99146.dist-info/METADATA,sha256=8hOZtHnM6nfjXFFoPu7nYRM3eTJ_4kui-4SPqwcDsh8,14624
231
+ matrice-1.0.99146.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
232
+ matrice-1.0.99146.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
233
+ matrice-1.0.99146.dist-info/RECORD,,