matrice 1.0.99144__py3-none-any.whl → 1.0.99145__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,9 @@
1
1
  from typing import Any, Dict, List, Optional
2
- from dataclasses import asdict, dataclass, field
2
+ from dataclasses import asdict
3
3
  import time
4
4
  from datetime import datetime, timezone
5
5
 
6
- from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
6
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
7
7
  from ..utils import (
8
8
  filter_by_confidence,
9
9
  filter_by_categories,
@@ -16,6 +16,7 @@ from ..utils import (
16
16
  BBoxSmoothingConfig,
17
17
  BBoxSmoothingTracker
18
18
  )
19
+ from dataclasses import dataclass, field
19
20
  from ..core.config import BaseConfig, AlertConfig, ZoneConfig
20
21
 
21
22
 
@@ -29,40 +30,39 @@ class LaneDetectionConfig(BaseConfig):
29
30
  smoothing_confidence_range_factor: float = 0.5
30
31
  confidence_threshold: float = 0.6
31
32
  usecase_categories: List[str] = field(
32
- default_factory=lambda: ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
33
+ default_factory=lambda: ['divider-line', 'dotted-line', 'double-line', 'random-line', 'road-sign-line', 'solid-line']
33
34
  )
34
35
  target_categories: List[str] = field(
35
- default_factory=lambda: ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
36
+ default_factory=lambda: ['divider-line', 'dotted-line', 'double-line', 'random-line', 'road-sign-line', 'solid-line']
36
37
  )
37
38
  alert_config: Optional[AlertConfig] = None
38
39
  index_to_category: Optional[Dict[int, str]] = field(
39
40
  default_factory=lambda: {
40
- 0: "Divider-Line",
41
- 1: "Dotted-Line",
42
- 2: "Double-Line",
43
- 3: "Random-Line",
44
- 4: "Road-Sign-Line",
45
- 5: "Solid-Line"
41
+ 0: "divider-line",
42
+ 1: "dotted-line",
43
+ 2: "double-line",
44
+ 3: "random-line",
45
+ 4: "road-sign-line",
46
+ 5: "solid-line"
46
47
  }
47
48
  )
48
49
 
49
50
 
50
51
  class LaneDetectionUseCase(BaseProcessor):
51
52
  CATEGORY_DISPLAY = {
52
- "Divider-Line": "Divider Line",
53
- "Dotted-Line": "Dotted Line",
54
- "Double-Line": "Double Line",
55
- "Random-Line": "Random Line",
56
- "Road-Sign-Line": "Road Sign Line",
57
- "Solid-Line": "Solid Line"
53
+ "divider-line": "Divider Line",
54
+ "dotted-line": "Dotted Line",
55
+ "double-line": "Double Line",
56
+ "random-line": "Random Line",
57
+ "road-sign-line": "Road Sign Line",
58
+ "solid-line": "Solid Line"
58
59
  }
59
-
60
60
  def __init__(self):
61
61
  super().__init__("lane_detection")
62
62
  self.category = "traffic"
63
63
  self.CASE_TYPE: Optional[str] = 'lane_detection'
64
64
  self.CASE_VERSION: Optional[str] = '1.0'
65
- self.target_categories = ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
65
+ self.target_categories = ['divider-line', 'dotted-line', 'double-line', 'random-line', 'road-sign-line', 'solid-line']
66
66
  self.smoothing_tracker = None
67
67
  self.tracker = None
68
68
  self._total_frame_counter = 0
@@ -82,7 +82,6 @@ class LaneDetectionUseCase(BaseProcessor):
82
82
  return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
83
83
  if context is None:
84
84
  context = ProcessingContext()
85
-
86
85
  input_format = match_results_structure(data)
87
86
  context.input_format = input_format
88
87
  context.confidence_threshold = config.confidence_threshold
@@ -92,7 +91,7 @@ class LaneDetectionUseCase(BaseProcessor):
92
91
  self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
93
92
  else:
94
93
  processed_data = data
95
- self.logger.debug("Did not apply confidence filtering since no threshold provided")
94
+ self.logger.debug("Did not apply confidence filtering")
96
95
 
97
96
  if config.index_to_category:
98
97
  processed_data = apply_category_mapping(processed_data, config.index_to_category)
@@ -193,39 +192,42 @@ class LaneDetectionUseCase(BaseProcessor):
193
192
  if not config.alert_config:
194
193
  return alerts
195
194
 
195
+ total = summary.get("total_count", 0)
196
196
  if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
197
197
  for category, threshold in config.alert_config.count_thresholds.items():
198
- if category == "all" and total_detections > threshold:
199
- alerts.append({
200
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
201
- "alert_id": f"alert_{category}_{frame_key}",
202
- "incident_category": self.CASE_TYPE,
203
- "threshold_level": threshold,
204
- "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
205
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
206
- getattr(config.alert_config, 'alert_value', ['JSON']))}
207
- })
208
- elif category in per_category_count and per_category_count[category] > threshold:
198
+ if category == "all" and total > threshold:
209
199
  alerts.append({
210
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
211
- "alert_id": f"alert_{category}_{frame_key}",
200
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
201
+ "alert_id": "alert_" + category + '_' + frame_key,
212
202
  "incident_category": self.CASE_TYPE,
213
203
  "threshold_level": threshold,
214
204
  "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
215
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
216
- getattr(config.alert_config, 'alert_value', ['JSON']))}
205
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
206
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
217
207
  })
208
+ elif category in per_category_count:
209
+ count = per_category_count[category]
210
+ if count > threshold:
211
+ alerts.append({
212
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
213
+ "alert_id": "alert_" + category + '_' + frame_key,
214
+ "incident_category": self.CASE_TYPE,
215
+ "threshold_level": threshold,
216
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
217
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
218
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
219
+ })
218
220
  return alerts
219
221
 
220
222
  def _generate_incidents(self, counting_summary: Dict, alerts: List, config: LaneDetectionConfig,
221
- frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
223
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
222
224
  incidents = []
223
225
  total_detections = counting_summary.get("total_count", 0)
224
226
  current_timestamp = self._get_current_timestamp_str(stream_info)
225
227
  camera_info = self.get_camera_info_from_stream(stream_info)
226
-
228
+
227
229
  self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
228
-
230
+
229
231
  if total_detections > 0:
230
232
  level = "low"
231
233
  intensity = 5.0
@@ -251,6 +253,7 @@ class LaneDetectionUseCase(BaseProcessor):
251
253
  level = "medium"
252
254
  self._ascending_alert_list.append(1)
253
255
  else:
256
+ level = "low"
254
257
  self._ascending_alert_list.append(0)
255
258
  else:
256
259
  if total_detections > 30:
@@ -258,7 +261,7 @@ class LaneDetectionUseCase(BaseProcessor):
258
261
  intensity = 10.0
259
262
  self._ascending_alert_list.append(3)
260
263
  elif total_detections > 25:
261
- level = "significant"
264
+ level = " significant"
262
265
  intensity = 9.0
263
266
  self._ascending_alert_list.append(2)
264
267
  elif total_detections > 15:
@@ -277,16 +280,16 @@ class LaneDetectionUseCase(BaseProcessor):
277
280
  alert_settings = []
278
281
  if config.alert_config and hasattr(config.alert_config, 'alert_type'):
279
282
  alert_settings.append({
280
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
283
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
281
284
  "incident_category": self.CASE_TYPE,
282
285
  "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
283
286
  "ascending": True,
284
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
285
- getattr(config.alert_config, 'alert_value', ['JSON']))}
287
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
288
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
286
289
  })
287
290
 
288
291
  event = self.create_incident(
289
- incident_id=f"{self.CASE_TYPE}_{frame_number}",
292
+ incident_id=self.CASE_TYPE + '_' + str(frame_number),
290
293
  incident_type=self.CASE_TYPE,
291
294
  severity_level=level,
292
295
  human_text=human_text,
@@ -304,8 +307,7 @@ class LaneDetectionUseCase(BaseProcessor):
304
307
  return incidents
305
308
 
306
309
  def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: LaneDetectionConfig,
307
- frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
308
- """Generate structured tracking stats matching expected format."""
310
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
309
311
  camera_info = self.get_camera_info_from_stream(stream_info)
310
312
  tracking_stats = []
311
313
  total_detections = counting_summary.get("total_count", 0)
@@ -316,13 +318,8 @@ class LaneDetectionUseCase(BaseProcessor):
316
318
  high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
317
319
  high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
318
320
 
319
- # Build total_counts and current_counts arrays
320
- total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items()]
321
- current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items()]
322
-
323
- # Log counts for debugging
324
- self.logger.debug(f"Total counts: {total_counts}")
325
- self.logger.debug(f"Current counts: {current_counts}")
321
+ total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
322
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
326
323
 
327
324
  detections = []
328
325
  for detection in counting_summary.get("detections", []):
@@ -344,12 +341,12 @@ class LaneDetectionUseCase(BaseProcessor):
344
341
  alert_settings = []
345
342
  if config.alert_config and hasattr(config.alert_config, 'alert_type'):
346
343
  alert_settings.append({
347
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
344
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
348
345
  "incident_category": self.CASE_TYPE,
349
346
  "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
350
347
  "ascending": True,
351
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
352
- getattr(config.alert_config, 'alert_value', ['JSON']))}
348
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
349
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
353
350
  })
354
351
 
355
352
  human_text_lines = [f"Tracking Statistics:"]
@@ -358,7 +355,8 @@ class LaneDetectionUseCase(BaseProcessor):
358
355
  human_text_lines.append(f"\t{cat}: {count}")
359
356
  human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
360
357
  for cat, count in total_counts_dict.items():
361
- human_text_lines.append(f"\t{cat}: {count}")
358
+ if count > 0:
359
+ human_text_lines.append(f"\t{cat}: {count}")
362
360
  if alerts:
363
361
  for alert in alerts:
364
362
  human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
@@ -367,6 +365,7 @@ class LaneDetectionUseCase(BaseProcessor):
367
365
  human_text = "\n".join(human_text_lines)
368
366
 
369
367
  reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
368
+
370
369
  tracking_stat = self.create_tracking_stats(
371
370
  total_counts=total_counts,
372
371
  current_counts=current_counts,
@@ -383,7 +382,7 @@ class LaneDetectionUseCase(BaseProcessor):
383
382
  return tracking_stats
384
383
 
385
384
  def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: LaneDetectionConfig,
386
- stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
385
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
387
386
  if is_empty:
388
387
  return []
389
388
 
@@ -448,33 +447,42 @@ class LaneDetectionUseCase(BaseProcessor):
448
447
  seconds = round(float(timestamp % 60), 2)
449
448
  return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
450
449
 
451
- def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str] = None) -> str:
450
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
451
+ """Get formatted current timestamp based on stream type."""
452
452
  if not stream_info:
453
453
  return "00:00:00.00"
454
+ # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
454
455
  if precision:
455
456
  if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
456
457
  if frame_id:
457
- start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
458
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
458
459
  else:
459
- start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
460
- return self._format_timestamp_for_video(start_time)
460
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
461
+ stream_time_str = self._format_timestamp_for_video(start_time)
462
+ return stream_time_str
461
463
  else:
462
464
  return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
465
+
463
466
  if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
464
- if frame_id:
465
- start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
466
- else:
467
- start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
468
- return self._format_timestamp_for_video(start_time)
467
+ if frame_id:
468
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
469
+ else:
470
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
471
+ stream_time_str = self._format_timestamp_for_video(start_time)
472
+ return stream_time_str
469
473
  else:
474
+ # For streams, use stream_time from stream_info
470
475
  stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
471
476
  if stream_time_str:
477
+ # Parse the high precision timestamp string to get timestamp
472
478
  try:
479
+ # Remove " UTC" suffix and parse
473
480
  timestamp_str = stream_time_str.replace(" UTC", "")
474
481
  dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
475
482
  timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
476
483
  return self._format_timestamp_for_stream(timestamp)
477
484
  except:
485
+ # Fallback to current time if parsing fails
478
486
  return self._format_timestamp_for_stream(time.time())
479
487
  else:
480
488
  return self._format_timestamp_for_stream(time.time())
@@ -506,34 +514,23 @@ class LaneDetectionUseCase(BaseProcessor):
506
514
  return dt.strftime('%Y:%m:%d %H:%M:%S')
507
515
 
508
516
  def _count_categories(self, detections: list, config: LaneDetectionConfig) -> dict:
509
- """
510
- Count the number of detections per category and return a summary dict.
511
- """
512
- counts = {cat: 0 for cat in self.target_categories} # Initialize with all target categories
513
- valid_detections = []
514
-
517
+ counts = {}
515
518
  for det in detections:
516
519
  cat = det.get('category', 'unknown')
517
- # Normalize category to match target_categories
518
- normalized_cat = cat.replace('-', ' ').title().replace(' ', '-') # e.g., "solid-line" -> "Solid-Line"
519
- if normalized_cat not in self.target_categories:
520
- self.logger.debug(f"Skipping detection with category {normalized_cat}, not in target categories")
521
- continue
522
- counts[normalized_cat] += 1
523
- det['category'] = normalized_cat # Update detection with normalized category
524
- valid_detections.append({
525
- "bounding_box": det.get("bounding_box"),
526
- "category": normalized_cat,
527
- "confidence": det.get("confidence"),
528
- "track_id": det.get("track_id"),
529
- "frame_id": det.get("frame_id")
530
- })
531
- self.logger.debug(f"Counted detection for category {normalized_cat}, confidence {det.get('confidence')}")
532
-
520
+ counts[cat] = counts.get(cat, 0) + 1
533
521
  return {
534
522
  "total_count": sum(counts.values()),
535
523
  "per_category_count": counts,
536
- "detections": valid_detections
524
+ "detections": [
525
+ {
526
+ "bounding_box": det.get("bounding_box"),
527
+ "category": det.get("category"),
528
+ "confidence": det.get("confidence"),
529
+ "track_id": det.get("track_id"),
530
+ "frame_id": det.get("frame_id")
531
+ }
532
+ for det in detections
533
+ ]
537
534
  }
538
535
 
539
536
  def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: matrice
3
- Version: 1.0.99144
3
+ Version: 1.0.99145
4
4
  Summary: SDK for connecting to matrice.ai services
5
5
  Home-page: https://github.com/matrice-ai/python-sdk
6
6
  Author: Matrice.ai
@@ -191,7 +191,7 @@ matrice/deploy/utils/post_processing/usecases/plaque_segmentation_img.py,sha256=
191
191
  matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py,sha256=6Mv8SoEE5CGItY7S0g-SY5Lb3DV-WWVMlpEp04a86a8,43197
192
192
  matrice/deploy/utils/post_processing/usecases/ppe_compliance.py,sha256=G9P9j9E9nfNJInHJxmK1Lb4daFBlG5hq0aqotTLvFFE,30146
193
193
  matrice/deploy/utils/post_processing/usecases/price_tag_detection.py,sha256=Sn_Dvwf5f_dcfaiPIl-pqckgP8z96CeNIJ4hfeab3FM,39880
194
- matrice/deploy/utils/post_processing/usecases/road_lane_detection.py,sha256=ljWoDeFJb0IJJPClYaZrNnBZd87JqKI64jE_I79GAYo,31146
194
+ matrice/deploy/utils/post_processing/usecases/road_lane_detection.py,sha256=V_KxwBtAHSNkyoH8sXw-U-P3J8ToXtX3ncc69gn6Tds,31591
195
195
  matrice/deploy/utils/post_processing/usecases/shelf_inventory_detection.py,sha256=1juloltHnCj3U499Aps0ggE0nEI37x3iKe4DgfP4RCw,29140
196
196
  matrice/deploy/utils/post_processing/usecases/shoplifting_detection.py,sha256=zqeV_ARV5gJqMY2sJGBjlU6UOb0SthGGbC8UNj_mycs,34701
197
197
  matrice/deploy/utils/post_processing/usecases/shopping_cart_analysis.py,sha256=9Ej2xiZM7yq5sOBcSXIllou_z0rSZDJ_QHyYz6HxZSY,43957
@@ -225,8 +225,8 @@ matrice/deployment/camera_manager.py,sha256=ReBZqm1CNXRImKcbcZ4uWAT3TUWkof1D28oB
225
225
  matrice/deployment/deployment.py,sha256=PLIUD-PxTaC2Zxb3Y12wUddsryV-OJetjCjLoSUh7S4,48103
226
226
  matrice/deployment/inference_pipeline.py,sha256=bXLgd29ViA7o0c7YWLFJl1otBUQfTPb61jS6VawQB0Y,37918
227
227
  matrice/deployment/streaming_gateway_manager.py,sha256=w5swGsuFVfZIdOm2ZuBHRHlRdYYJMLopLsf2gb91lQ8,20946
228
- matrice-1.0.99144.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
229
- matrice-1.0.99144.dist-info/METADATA,sha256=xF8gj0tJeJ-JY50BNsnZfd9LLxRif6dtncWMdvo1pE4,14624
230
- matrice-1.0.99144.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
231
- matrice-1.0.99144.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
232
- matrice-1.0.99144.dist-info/RECORD,,
228
+ matrice-1.0.99145.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
229
+ matrice-1.0.99145.dist-info/METADATA,sha256=SZvAGseewa2r30bvP0UDZ6hMk3qOQVPIGonmT3kTSRc,14624
230
+ matrice-1.0.99145.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
231
+ matrice-1.0.99145.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
232
+ matrice-1.0.99145.dist-info/RECORD,,