matrice 1.0.99179__py3-none-any.whl → 1.0.99180__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,6 @@ from typing import Any, Dict, List, Optional
10
10
  from dataclasses import asdict
11
11
  import time
12
12
  from datetime import datetime, timezone
13
- import copy # Added for deep copying detections to preserve original masks
14
13
 
15
14
  from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
16
15
  from ..utils import (
@@ -40,7 +39,7 @@ class PotholeConfig(BaseConfig):
40
39
  smoothing_confidence_range_factor: float = 0.5
41
40
 
42
41
  #confidence thresholds
43
- confidence_threshold: float = 0.6
42
+ confidence_threshold: float = 0.3
44
43
 
45
44
  usecase_categories: List[str] = field(
46
45
  default_factory=lambda: ['pothole']
@@ -54,28 +53,27 @@ class PotholeConfig(BaseConfig):
54
53
 
55
54
  index_to_category: Optional[Dict[int, str]] = field(
56
55
  default_factory=lambda: {
57
- 0:"pothole"
58
-
56
+ 0: 'pothole'
59
57
  }
60
58
  )
61
59
 
62
60
 
63
61
  class PotholeSegmentationUseCase(BaseProcessor):
64
-
65
- # Human-friendly display names for categories
62
+ # Human-friendly display names for categories
66
63
  CATEGORY_DISPLAY = {
67
64
  "pothole": "pothole"
68
65
  }
66
+
69
67
  def __init__(self):
70
68
  super().__init__("pothole_segmentation")
71
69
  self.category = "infrastructure"
72
70
 
71
+ self.CASE_TYPE: Optional[str] = 'pothole_segmentation'
72
+ self.CASE_VERSION: Optional[str] = '1.3'
73
+
73
74
  # List of categories to track
74
75
  self.target_categories = ["pothole"]
75
76
 
76
- self.CASE_TYPE: Optional[str] = 'Pothole_detection'
77
- self.CASE_VERSION: Optional[str] = '1.3'
78
-
79
77
  # Initialize smoothing tracker
80
78
  self.smoothing_tracker = None
81
79
 
@@ -89,13 +87,6 @@ class PotholeSegmentationUseCase(BaseProcessor):
89
87
  # Track start time for "TOTAL SINCE" calculation
90
88
  self._tracking_start_time = None
91
89
 
92
- # ------------------------------------------------------------------ #
93
- # Canonical tracking aliasing to avoid duplicate counts #
94
- # ------------------------------------------------------------------ #
95
- # Maps raw tracker-generated IDs to stable canonical IDs that persist
96
- # even if the underlying tracker re-assigns a new ID after a short
97
- # interruption. This mirrors the logic used in people_counting to
98
- # provide accurate unique counting.
99
90
  self._track_aliases: Dict[Any, Any] = {}
100
91
  self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
101
92
  # Tunable parameters – adjust if necessary for specific scenarios
@@ -108,7 +99,7 @@ class PotholeSegmentationUseCase(BaseProcessor):
108
99
  def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
109
100
  stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
110
101
  """
111
- Main entry point for post-processing.
102
+ Main entry point for post-processing.
112
103
  Applies category mapping, smoothing, counting, alerting, and summary generation.
113
104
  Returns a ProcessingResult with all relevant outputs.
114
105
  """
@@ -124,42 +115,39 @@ class PotholeSegmentationUseCase(BaseProcessor):
124
115
  input_format = match_results_structure(data)
125
116
  context.input_format = input_format
126
117
  context.confidence_threshold = config.confidence_threshold
127
-
128
- # Step 1: Confidence filtering
118
+
129
119
  if config.confidence_threshold is not None:
130
120
  processed_data = filter_by_confidence(data, config.confidence_threshold)
121
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
131
122
  else:
132
123
  processed_data = data
124
+
133
125
  self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
134
126
 
135
127
  # Step 2: Apply category mapping if provided
136
128
  if config.index_to_category:
137
129
  processed_data = apply_category_mapping(processed_data, config.index_to_category)
130
+ self.logger.debug("Applied category mapping")
138
131
 
139
- # Step 3: Category filtering
140
132
  if config.target_categories:
141
133
  processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
134
+ self.logger.debug(f"Applied category filtering")
142
135
 
143
- # Step 4: Apply bbox smoothing if enabled
144
- # Deep-copy detections so that we preserve the original masks before any
145
- # smoothing/tracking logic potentially removes them.
146
- raw_processed_data = [copy.deepcopy(det) for det in processed_data]
136
+ # Apply bbox smoothing if enabled
147
137
  if config.enable_smoothing:
148
138
  if self.smoothing_tracker is None:
149
139
  smoothing_config = BBoxSmoothingConfig(
150
140
  smoothing_algorithm=config.smoothing_algorithm,
151
141
  window_size=config.smoothing_window_size,
152
142
  cooldown_frames=config.smoothing_cooldown_frames,
153
- confidence_threshold=config.confidence_threshold,
143
+ confidence_threshold=config.confidence_threshold, # Use mask threshold as default
154
144
  confidence_range_factor=config.smoothing_confidence_range_factor,
155
145
  enable_smoothing=True
156
146
  )
157
147
  self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
158
-
159
148
  processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
160
- # Restore masks after smoothing
161
149
 
162
- # Step 5: Advanced tracking (BYTETracker-like)
150
+ # Advanced tracking (BYTETracker-like)
163
151
  try:
164
152
  from ..advanced_tracker import AdvancedTracker
165
153
  from ..advanced_tracker.config import TrackerConfig
@@ -168,24 +156,19 @@ class PotholeSegmentationUseCase(BaseProcessor):
168
156
  if self.tracker is None:
169
157
  tracker_config = TrackerConfig()
170
158
  self.tracker = AdvancedTracker(tracker_config)
171
- self.logger.info("Initialized AdvancedTracker for Monitoring and tracking")
172
-
159
+ self.logger.info("Initialized AdvancedTracker for Monitoring and tracking")
160
+
161
+ # The tracker expects the data in the same format as input
162
+ # It will add track_id and frame_id to each detection
173
163
  processed_data = self.tracker.update(processed_data)
164
+
174
165
  except Exception as e:
175
166
  # If advanced tracker fails, fallback to unsmoothed detections
176
167
  self.logger.warning(f"AdvancedTracker failed: {e}")
177
168
 
178
- # Update tracking state for total count per label
169
+ # Update tracking state for total count per label
179
170
  self._update_tracking_state(processed_data)
180
171
 
181
- # ------------------------------------------------------------------ #
182
- # Re-attach segmentation masks that were present in the original input
183
- # but may have been stripped during smoothing/tracking. We match each
184
- # processed detection back to the raw detection with the highest IoU
185
- # and copy over its "masks" field (if available).
186
- # ------------------------------------------------------------------ #
187
- processed_data = self._attach_masks_to_detections(processed_data, raw_processed_data)
188
-
189
172
  # Update frame counter
190
173
  self._total_frame_counter += 1
191
174
 
@@ -200,20 +183,19 @@ class PotholeSegmentationUseCase(BaseProcessor):
200
183
  frame_number = start_frame
201
184
 
202
185
  # Compute summaries and alerts
203
- general_counting_summary = calculate_counting_summary(data)
204
- counting_summary = self._count_categories(processed_data, config)
205
- # Add total unique counts after tracking using only local state
206
- total_counts = self.get_total_counts()
207
- counting_summary['total_counts'] = total_counts
208
-
186
+ general_counting_summary = calculate_counting_summary(data)
187
+ counting_summary = self._count_categories(processed_data, config)
188
+ # Add total unique counts after tracking using only local state
189
+ total_counts = self.get_total_counts()
190
+ counting_summary['total_counts'] = total_counts
191
+
209
192
  alerts = self._check_alerts(counting_summary, frame_number, config)
210
193
  predictions = self._extract_predictions(processed_data)
211
194
 
212
- # Step: Generate structured events and tracking stats with frame-based keys
195
+ # Step: Generate structured incidents, tracking stats and business analytics with frame-based keys
213
196
  incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
214
- tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number,stream_info)
215
- # business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=False)
216
- business_analytics_list = []
197
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
198
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
217
199
  summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
218
200
 
219
201
  # Extract frame-based dictionaries from the lists
@@ -227,8 +209,9 @@ class PotholeSegmentationUseCase(BaseProcessor):
227
209
  "business_analytics": business_analytics,
228
210
  "alerts": alerts,
229
211
  "human_text": summary}
230
- }
231
-
212
+ }
213
+
214
+
232
215
  context.mark_completed()
233
216
 
234
217
  # Build result object following the new pattern
@@ -290,8 +273,8 @@ class PotholeSegmentationUseCase(BaseProcessor):
290
273
  "threshold_level": threshold,
291
274
  "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
292
275
  "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
293
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
294
- }
276
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
277
+ }
295
278
  })
296
279
  elif category in summary.get("per_category_count", {}):
297
280
  count = summary.get("per_category_count", {})[category]
@@ -303,27 +286,25 @@ class PotholeSegmentationUseCase(BaseProcessor):
303
286
  "threshold_level": threshold,
304
287
  "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
305
288
  "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
306
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
307
- }
289
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
290
+ }
308
291
  })
309
292
  else:
310
293
  pass
311
294
  return alerts
312
295
 
313
296
  def _generate_incidents(self, counting_summary: Dict, alerts: List, config: PotholeConfig,
314
- frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
297
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
315
298
  Dict]:
316
- """Generate structured events for the output format with frame-based keys."""
317
-
318
- # Use frame number as key, fallback to 'current_frame' if not available
319
- frame_key = str(frame_number) if frame_number is not None else "current_frame"
320
- incidents=[]
299
+ """Generate structured incidents for the output format with frame-based keys."""
300
+
301
+ incidents = []
321
302
  total_detections = counting_summary.get("total_count", 0)
322
303
  current_timestamp = self._get_current_timestamp_str(stream_info)
323
304
  camera_info = self.get_camera_info_from_stream(stream_info)
324
305
 
325
306
  self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
326
-
307
+
327
308
  if total_detections > 0:
328
309
  # Determine event level based on thresholds
329
310
  level = "low"
@@ -371,7 +352,7 @@ class PotholeSegmentationUseCase(BaseProcessor):
371
352
  intensity = min(10.0, total_detections / 3.0)
372
353
  self._ascending_alert_list.append(0)
373
354
 
374
- # Generate human text in new format
355
+ # Generate human text in new format
375
356
  human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
376
357
  human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
377
358
  human_text = "\n".join(human_text_lines)
@@ -389,9 +370,9 @@ class PotholeSegmentationUseCase(BaseProcessor):
389
370
  })
390
371
 
391
372
  event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
392
- severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
393
- start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
394
- level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
373
+ severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
374
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
375
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
395
376
  incidents.append(event)
396
377
 
397
378
  else:
@@ -403,22 +384,23 @@ class PotholeSegmentationUseCase(BaseProcessor):
403
384
  def _generate_tracking_stats(
404
385
  self,
405
386
  counting_summary: Dict,
406
- alerts: Any,
387
+ alerts: List,
407
388
  config: PotholeConfig,
408
389
  frame_number: Optional[int] = None,
409
390
  stream_info: Optional[Dict[str, Any]] = None
410
391
  ) -> List[Dict]:
411
- """Generate structured tracking stats for the output format with frame-based keys, including track_ids_info and detections with masks."""
412
- #frame_key = str(frame_number) if frame_number is not None else "current_frame"
413
- tracking_stats = [] #[{frame_key: []}]
414
- #frame_tracking_stats = tracking_stats[0][frame_key]
415
-
416
- total_detections = counting_summary.get("total_count", 0)
417
- total_counts = counting_summary.get("total_counts", {})
418
- cumulative_total = sum(total_counts.values()) if total_counts else 0
419
- per_category_count = counting_summary.get("per_category_count", {})
420
-
421
- track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
392
+ """Generate structured tracking stats matching eg.json format."""
393
+ camera_info = self.get_camera_info_from_stream(stream_info)
394
+
395
+ # frame_key = str(frame_number) if frame_number is not None else "current_frame"
396
+ # tracking_stats = [{frame_key: []}]
397
+ # frame_tracking_stats = tracking_stats[0][frame_key]
398
+ tracking_stats = []
399
+
400
+ total_detections = counting_summary.get("total_count", 0) #CURRENT total count of all classes
401
+ total_counts_dict = counting_summary.get("total_counts", {}) #TOTAL cumulative counts per class
402
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
403
+ per_category_count = counting_summary.get("per_category_count", {}) #CURRENT count per class
422
404
 
423
405
  current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
424
406
  start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
@@ -427,33 +409,16 @@ class PotholeSegmentationUseCase(BaseProcessor):
427
409
  high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
428
410
  high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
429
411
 
430
- camera_info = self.get_camera_info_from_stream(stream_info)
431
- human_text_lines = []
412
+
413
+ # Build total_counts array in expected format
414
+ total_counts = []
415
+ for cat, count in total_counts_dict.items():
416
+ if count > 0:
417
+ total_counts.append({
418
+ "category": cat,
419
+ "count": count
420
+ })
432
421
 
433
- # CURRENT FRAME section
434
- human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
435
- if total_detections > 0:
436
- category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
437
- if len(category_counts) == 1:
438
- detection_text = category_counts[0] + " detected"
439
- elif len(category_counts) == 2:
440
- detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
441
- else:
442
- detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
443
- human_text_lines.append(f"\t- {detection_text}")
444
- else:
445
- human_text_lines.append(f"\t- No detections")
446
-
447
- human_text_lines.append("") # spacing
448
-
449
- # TOTAL SINCE section
450
- human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
451
- human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
452
- # Add category-wise counts
453
- if total_counts:
454
- for cat, count in total_counts.items():
455
- if count > 0: # Only include categories with non-zero counts
456
- human_text_lines.append(f"\t- {cat}: {count}")
457
422
  # Build current_counts array in expected format
458
423
  current_counts = []
459
424
  for cat, count in per_category_count.items():
@@ -463,9 +428,6 @@ class PotholeSegmentationUseCase(BaseProcessor):
463
428
  "count": count
464
429
  })
465
430
 
466
- human_text = "\n".join(human_text_lines)
467
-
468
- # Include detections with masks from counting_summary
469
431
  # Prepare detections without confidence scores (as per eg.json)
470
432
  detections = []
471
433
  for detection in counting_summary.get("detections", []):
@@ -494,10 +456,24 @@ class PotholeSegmentationUseCase(BaseProcessor):
494
456
  "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
495
457
  "ascending": True,
496
458
  "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
497
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
498
- }
459
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
460
+ }
499
461
  })
500
462
 
463
+ # Generate human_text in expected format
464
+ human_text_lines = [f"Tracking Statistics:"]
465
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
466
+ human_text_lines.append(f"Potholes Detected - ")
467
+
468
+ for cat, count in per_category_count.items():
469
+ human_text_lines.append(f"\t{cat}: {count}")
470
+
471
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
472
+ human_text_lines.append(f"Total Potholes Detected - ")
473
+ for cat, count in total_counts_dict.items():
474
+ if count > 0:
475
+ human_text_lines.append(f"\t{cat}: {count}")
476
+
501
477
  if alerts:
502
478
  for alert in alerts:
503
479
  human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
@@ -505,7 +481,7 @@ class PotholeSegmentationUseCase(BaseProcessor):
505
481
  human_text_lines.append("Alerts: None")
506
482
 
507
483
  human_text = "\n".join(human_text_lines)
508
- reset_settings = [
484
+ reset_settings=[
509
485
  {
510
486
  "interval_type": "daily",
511
487
  "reset_time": {
@@ -516,14 +492,14 @@ class PotholeSegmentationUseCase(BaseProcessor):
516
492
  ]
517
493
 
518
494
  tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
519
- detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
520
- reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
521
- reset_time=high_precision_reset_timestamp)
495
+ detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
496
+ reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
497
+ reset_time=high_precision_reset_timestamp)
522
498
 
523
499
  tracking_stats.append(tracking_stat)
524
500
  return tracking_stats
525
501
 
526
- def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: PotholeConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
502
+ def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: PotholeConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
527
503
  """Generate standardized business analytics for the agg_summary structure."""
528
504
  if is_empty:
529
505
  return []
@@ -554,36 +530,6 @@ class PotholeSegmentationUseCase(BaseProcessor):
554
530
 
555
531
  return [lines]
556
532
 
557
-
558
- def _count_categories(self, detections: list, config: PotholeConfig) -> dict:
559
- """
560
- Count the number of detections per category and return a summary dict.
561
- The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', 'masks', etc.
562
- Output structure will include 'track_id' and 'masks' for each detection as per AdvancedTracker output.
563
- """
564
- counts = {}
565
- valid_detections = []
566
- for det in detections:
567
- cat = det.get('category', 'unknown')
568
- if not all(k in det for k in ['category', 'confidence', 'bounding_box']): # Validate required fields
569
- self.logger.warning(f"Skipping invalid detection: {det}")
570
- continue
571
- counts[cat] = counts.get(cat, 0) + 1
572
- valid_detections.append({
573
- "bounding_box": det.get("bounding_box"),
574
- "category": det.get("category"),
575
- "confidence": det.get("confidence"),
576
- "track_id": det.get("track_id"),
577
- "frame_id": det.get("frame_id"),
578
- "masks": det.get("masks", det.get("mask", [])) # Include masks, fallback to empty list
579
- })
580
- self.logger.debug(f"Valid detections after filtering: {len(valid_detections)}")
581
- return {
582
- "total_count": sum(counts.values()),
583
- "per_category_count": counts,
584
- "detections": valid_detections
585
- }
586
-
587
533
  def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
588
534
  """
589
535
  Get detailed information about track IDs (per frame).
@@ -637,37 +583,45 @@ class PotholeSegmentationUseCase(BaseProcessor):
637
583
  """
638
584
  return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
639
585
 
640
- def _format_timestamp_for_video(self, timestamp: float) -> str:
641
- """Format timestamp for video chunks (HH:MM:SS.ms format)."""
642
- hours = int(timestamp // 3600)
643
- minutes = int((timestamp % 3600) // 60)
644
- seconds = timestamp % 60
645
- return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
646
586
 
647
587
  def _format_timestamp_for_stream(self, timestamp: float) -> str:
648
588
  """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
649
589
  dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
650
590
  return dt.strftime('%Y:%m:%d %H:%M:%S')
651
591
 
652
- def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
592
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
593
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
594
+ hours = int(timestamp // 3600)
595
+ minutes = int((timestamp % 3600) // 60)
596
+ seconds = round(float(timestamp % 60),2)
597
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
598
+
599
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
653
600
  """Get formatted current timestamp based on stream type."""
654
601
  if not stream_info:
655
602
  return "00:00:00.00"
656
-
603
+ # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
657
604
  if precision:
658
- if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
659
- stream_time_str = stream_info.get("video_timestamp", "")
660
- return stream_time_str[:8]
605
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
606
+ if frame_id:
607
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
608
+ else:
609
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
610
+ stream_time_str = self._format_timestamp_for_video(start_time)
611
+ return stream_time_str
661
612
  else:
662
613
  return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
663
614
 
664
- if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
665
- # If video format, return video timestamp
666
- stream_time_str = stream_info.get("video_timestamp", "")
667
- return stream_time_str[:8]
615
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
616
+ if frame_id:
617
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
618
+ else:
619
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
620
+ stream_time_str = self._format_timestamp_for_video(start_time)
621
+ return stream_time_str
668
622
  else:
669
623
  # For streams, use stream_time from stream_info
670
- stream_time_str = stream_info.get("stream_time", "")
624
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
671
625
  if stream_time_str:
672
626
  # Parse the high precision timestamp string to get timestamp
673
627
  try:
@@ -686,23 +640,20 @@ class PotholeSegmentationUseCase(BaseProcessor):
686
640
  """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
687
641
  if not stream_info:
688
642
  return "00:00:00"
689
-
690
- is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
691
643
  if precision:
692
- if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
644
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
693
645
  return "00:00:00"
694
646
  else:
695
647
  return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
696
648
 
697
-
698
- if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
649
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
699
650
  # If video format, start from 00:00:00
700
651
  return "00:00:00"
701
652
  else:
702
653
  # For streams, use tracking start time or current time with minutes/seconds reset
703
654
  if self._tracking_start_time is None:
704
655
  # Try to extract timestamp from stream_time string
705
- stream_time_str = stream_info.get("stream_time", "")
656
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
706
657
  if stream_time_str:
707
658
  try:
708
659
  # Remove " UTC" suffix and parse
@@ -720,60 +671,31 @@ class PotholeSegmentationUseCase(BaseProcessor):
720
671
  dt = dt.replace(minute=0, second=0, microsecond=0)
721
672
  return dt.strftime('%Y:%m:%d %H:%M:%S')
722
673
 
723
- # ------------------------------------------------------------------ #
724
- # Helper to merge masks back into detections #
725
- # ------------------------------------------------------------------ #
726
- def _attach_masks_to_detections(
727
- self,
728
- processed_detections: List[Dict[str, Any]],
729
- raw_detections: List[Dict[str, Any]],
730
- iou_threshold: float = 0.5,
731
- ) -> List[Dict[str, Any]]:
674
+ def _count_categories(self, detections: list, config: PotholeConfig) -> dict:
732
675
  """
733
- Attach segmentation masks from the original `raw_detections` list to the
734
- `processed_detections` list returned after smoothing/tracking.
735
-
736
- Matching between detections is performed using Intersection-over-Union
737
- (IoU) of the bounding boxes. For each processed detection we select the
738
- raw detection with the highest IoU above `iou_threshold` and copy its
739
- `masks` (or `mask`) field. If no suitable match is found, the detection
740
- keeps an empty list for `masks` to maintain a consistent schema.
676
+ Count the number of detections per category and return a summary dict.
677
+ The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', etc.
678
+ Output structure will include 'track_id' for each detection as per AdvancedTracker output.
741
679
  """
742
-
743
- if not processed_detections or not raw_detections:
744
- # Nothing to do – ensure masks key exists for downstream logic.
745
- for det in processed_detections:
746
- det.setdefault("masks", [])
747
- return processed_detections
748
-
749
- # Track which raw detections have already been matched to avoid
750
- # assigning the same mask to multiple processed detections.
751
- used_raw_indices = set()
752
-
753
- for det in processed_detections:
754
- best_iou = 0.0
755
- best_idx = None
756
-
757
- for idx, raw_det in enumerate(raw_detections):
758
- if idx in used_raw_indices:
759
- continue
760
-
761
- iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
762
- if iou > best_iou:
763
- best_iou = iou
764
- best_idx = idx
765
-
766
- if best_idx is not None and best_iou >= iou_threshold:
767
- raw_det = raw_detections[best_idx]
768
- masks = raw_det.get("masks", raw_det.get("mask"))
769
- if masks is not None:
770
- det["masks"] = masks
771
- used_raw_indices.add(best_idx)
772
- else:
773
- # No adequate match – default to empty list to keep schema consistent.
774
- det.setdefault("masks", ["EMPTY"])
775
-
776
- return processed_detections
680
+ counts = {}
681
+ for det in detections:
682
+ cat = det.get('category', 'unknown')
683
+ counts[cat] = counts.get(cat, 0) + 1
684
+ # Each detection dict will now include 'track_id' (and possibly 'frame_id')
685
+ return {
686
+ "total_count": sum(counts.values()),
687
+ "per_category_count": counts,
688
+ "detections": [
689
+ {
690
+ "bounding_box": det.get("bounding_box"),
691
+ "category": det.get("category"),
692
+ "confidence": det.get("confidence"),
693
+ "track_id": det.get("track_id"),
694
+ "frame_id": det.get("frame_id")
695
+ }
696
+ for det in detections
697
+ ]
698
+ }
777
699
 
778
700
  def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
779
701
  """
@@ -783,13 +705,11 @@ class PotholeSegmentationUseCase(BaseProcessor):
783
705
  {
784
706
  "category": det.get("category", "unknown"),
785
707
  "confidence": det.get("confidence", 0.0),
786
- "bounding_box": det.get("bounding_box", {}),
787
- "mask": det.get("mask", det.get("masks", None)) # Accept either key
708
+ "bounding_box": det.get("bounding_box", {})
788
709
  }
789
710
  for det in detections
790
711
  ]
791
712
 
792
-
793
713
  # ------------------------------------------------------------------ #
794
714
  # Canonical ID helpers #
795
715
  # ------------------------------------------------------------------ #
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: matrice
3
- Version: 1.0.99179
3
+ Version: 1.0.99180
4
4
  Summary: SDK for connecting to matrice.ai services
5
5
  Home-page: https://github.com/matrice-ai/python-sdk
6
6
  Author: Matrice.ai
@@ -189,7 +189,7 @@ matrice/deploy/utils/post_processing/usecases/pedestrian_detection.py,sha256=hPF
189
189
  matrice/deploy/utils/post_processing/usecases/people_counting.py,sha256=mDJOwcrs9OO4jIbJVr_ItWvjjGP2mgGFYlrP3R-mH2E,76528
190
190
  matrice/deploy/utils/post_processing/usecases/pipeline_detection.py,sha256=VsLTXMAqx0tRw7Olrxqx7SBLolZR7p2aFOrdSXLS-kE,30796
191
191
  matrice/deploy/utils/post_processing/usecases/plaque_segmentation_img.py,sha256=d__a0PkkObYVoC-Q5-2bFVfeyKnQHtB5xVAKVOCeFyk,41925
192
- matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py,sha256=wfitk18xNT82IKYYA21dZxjVIERTpwbARnNLLQxHkeM,43236
192
+ matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py,sha256=shxBDw9U59g2HPkNJTNPLBtXM10Zly_xC3QeB1oQrUE,39443
193
193
  matrice/deploy/utils/post_processing/usecases/ppe_compliance.py,sha256=G9P9j9E9nfNJInHJxmK1Lb4daFBlG5hq0aqotTLvFFE,30146
194
194
  matrice/deploy/utils/post_processing/usecases/price_tag_detection.py,sha256=09Tp6MGAHh95s-NSAp-4WC9iCc20sajWApuUBAvgXiQ,39880
195
195
  matrice/deploy/utils/post_processing/usecases/road_lane_detection.py,sha256=V_KxwBtAHSNkyoH8sXw-U-P3J8ToXtX3ncc69gn6Tds,31591
@@ -227,8 +227,8 @@ matrice/deployment/camera_manager.py,sha256=ReBZqm1CNXRImKcbcZ4uWAT3TUWkof1D28oB
227
227
  matrice/deployment/deployment.py,sha256=PLIUD-PxTaC2Zxb3Y12wUddsryV-OJetjCjLoSUh7S4,48103
228
228
  matrice/deployment/inference_pipeline.py,sha256=bXLgd29ViA7o0c7YWLFJl1otBUQfTPb61jS6VawQB0Y,37918
229
229
  matrice/deployment/streaming_gateway_manager.py,sha256=w5swGsuFVfZIdOm2ZuBHRHlRdYYJMLopLsf2gb91lQ8,20946
230
- matrice-1.0.99179.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
231
- matrice-1.0.99179.dist-info/METADATA,sha256=iHxOGzrUo-KBGZ8gYr_Eva1WYOSAl3IQ86itW_B26Gk,14624
232
- matrice-1.0.99179.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
233
- matrice-1.0.99179.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
234
- matrice-1.0.99179.dist-info/RECORD,,
230
+ matrice-1.0.99180.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
231
+ matrice-1.0.99180.dist-info/METADATA,sha256=h-xuX7F-7q5n5QY1M_WYSAJrUGwjS96CSGO2dK-3iUA,14624
232
+ matrice-1.0.99180.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
233
+ matrice-1.0.99180.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
234
+ matrice-1.0.99180.dist-info/RECORD,,