matrice 1.0.99180__py3-none-any.whl → 1.0.99182__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/utils/post_processing/usecases/license_plate_detection.py +19 -6
- matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py +169 -76
- {matrice-1.0.99180.dist-info → matrice-1.0.99182.dist-info}/METADATA +1 -1
- {matrice-1.0.99180.dist-info → matrice-1.0.99182.dist-info}/RECORD +7 -7
- {matrice-1.0.99180.dist-info → matrice-1.0.99182.dist-info}/WHEEL +0 -0
- {matrice-1.0.99180.dist-info → matrice-1.0.99182.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99180.dist-info → matrice-1.0.99182.dist-info}/top_level.txt +0 -0
@@ -149,14 +149,26 @@ class LicensePlateUseCase(BaseProcessor):
|
|
149
149
|
|
150
150
|
# Advanced tracking (BYTETracker-like)
|
151
151
|
try:
|
152
|
-
from ..advanced_tracker import AdvancedTracker
|
153
|
-
from ..advanced_tracker.config import TrackerConfig
|
154
|
-
|
155
|
-
# Create tracker instance if it doesn't exist (preserves state across frames)
|
156
152
|
if self.tracker is None:
|
157
|
-
|
153
|
+
# Configure tracker thresholds based on the use-case confidence threshold so that
|
154
|
+
# low-confidence detections (e.g. < 0.7) can still be initialised as tracks when
|
155
|
+
# the user passes a lower `confidence_threshold` in the post-processing config.
|
156
|
+
if config.confidence_threshold is not None:
|
157
|
+
tracker_config = TrackerConfig(
|
158
|
+
track_high_thresh=float(config.confidence_threshold),
|
159
|
+
# Allow even lower detections to participate in secondary association
|
160
|
+
track_low_thresh=max(0.05, float(config.confidence_threshold) / 2),
|
161
|
+
new_track_thresh=float(config.confidence_threshold)
|
162
|
+
)
|
163
|
+
else:
|
164
|
+
tracker_config = TrackerConfig()
|
158
165
|
self.tracker = AdvancedTracker(tracker_config)
|
159
|
-
self.logger.info(
|
166
|
+
self.logger.info(
|
167
|
+
"Initialized AdvancedTracker for Monitoring and tracking with thresholds: "
|
168
|
+
f"high={tracker_config.track_high_thresh}, "
|
169
|
+
f"low={tracker_config.track_low_thresh}, "
|
170
|
+
f"new={tracker_config.new_track_thresh}"
|
171
|
+
)
|
160
172
|
|
161
173
|
# The tracker expects the data in the same format as input
|
162
174
|
# It will add track_id and frame_id to each detection
|
@@ -470,6 +482,7 @@ class LicensePlateUseCase(BaseProcessor):
|
|
470
482
|
|
471
483
|
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
472
484
|
human_text_lines.append(f"Total License Plate Detected - ")
|
485
|
+
print(total_counts_dict)
|
473
486
|
for cat, count in total_counts_dict.items():
|
474
487
|
if count > 0:
|
475
488
|
human_text_lines.append(f"\t{cat}: {count}")
|
@@ -6,10 +6,12 @@ zone analysis, and alert generation.
|
|
6
6
|
|
7
7
|
"""
|
8
8
|
|
9
|
+
|
9
10
|
from typing import Any, Dict, List, Optional
|
10
11
|
from dataclasses import asdict
|
11
12
|
import time
|
12
13
|
from datetime import datetime, timezone
|
14
|
+
import copy # Added for deep copying detections to preserve original masks
|
13
15
|
|
14
16
|
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
15
17
|
from ..utils import (
|
@@ -59,7 +61,8 @@ class PotholeConfig(BaseConfig):
|
|
59
61
|
|
60
62
|
|
61
63
|
class PotholeSegmentationUseCase(BaseProcessor):
|
62
|
-
|
64
|
+
|
65
|
+
# Human-friendly display names for categories
|
63
66
|
CATEGORY_DISPLAY = {
|
64
67
|
"pothole": "pothole"
|
65
68
|
}
|
@@ -68,12 +71,12 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
68
71
|
super().__init__("pothole_segmentation")
|
69
72
|
self.category = "infrastructure"
|
70
73
|
|
74
|
+
# List of categories to track
|
75
|
+
self.target_categories = ['pothole']
|
76
|
+
|
71
77
|
self.CASE_TYPE: Optional[str] = 'pothole_segmentation'
|
72
78
|
self.CASE_VERSION: Optional[str] = '1.3'
|
73
79
|
|
74
|
-
# List of categories to track
|
75
|
-
self.target_categories = ["pothole"]
|
76
|
-
|
77
80
|
# Initialize smoothing tracker
|
78
81
|
self.smoothing_tracker = None
|
79
82
|
|
@@ -87,6 +90,13 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
87
90
|
# Track start time for "TOTAL SINCE" calculation
|
88
91
|
self._tracking_start_time = None
|
89
92
|
|
93
|
+
# ------------------------------------------------------------------ #
|
94
|
+
# Canonical tracking aliasing to avoid duplicate counts #
|
95
|
+
# ------------------------------------------------------------------ #
|
96
|
+
# Maps raw tracker-generated IDs to stable canonical IDs that persist
|
97
|
+
# even if the underlying tracker re-assigns a new ID after a short
|
98
|
+
# interruption. This mirrors the logic used in people_counting to
|
99
|
+
# provide accurate unique counting.
|
90
100
|
self._track_aliases: Dict[Any, Any] = {}
|
91
101
|
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
92
102
|
# Tunable parameters – adjust if necessary for specific scenarios
|
@@ -99,7 +109,7 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
99
109
|
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
100
110
|
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
101
111
|
"""
|
102
|
-
Main entry point for
|
112
|
+
Main entry point for post-processing.
|
103
113
|
Applies category mapping, smoothing, counting, alerting, and summary generation.
|
104
114
|
Returns a ProcessingResult with all relevant outputs.
|
105
115
|
"""
|
@@ -115,60 +125,80 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
115
125
|
input_format = match_results_structure(data)
|
116
126
|
context.input_format = input_format
|
117
127
|
context.confidence_threshold = config.confidence_threshold
|
118
|
-
|
128
|
+
|
129
|
+
# Step 1: Confidence filtering
|
119
130
|
if config.confidence_threshold is not None:
|
120
131
|
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
121
|
-
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
122
132
|
else:
|
123
133
|
processed_data = data
|
124
|
-
|
125
134
|
self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
|
126
135
|
|
127
136
|
# Step 2: Apply category mapping if provided
|
128
137
|
if config.index_to_category:
|
129
138
|
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
130
|
-
self.logger.debug("Applied category mapping")
|
131
139
|
|
140
|
+
# Step 3: Category filtering
|
132
141
|
if config.target_categories:
|
133
142
|
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
134
|
-
self.logger.debug(f"Applied category filtering")
|
135
143
|
|
136
|
-
# Apply bbox smoothing if enabled
|
144
|
+
# Step 4: Apply bbox smoothing if enabled
|
145
|
+
# Deep-copy detections so that we preserve the original masks before any
|
146
|
+
# smoothing/tracking logic potentially removes them.
|
147
|
+
raw_processed_data = [copy.deepcopy(det) for det in processed_data]
|
137
148
|
if config.enable_smoothing:
|
138
149
|
if self.smoothing_tracker is None:
|
139
150
|
smoothing_config = BBoxSmoothingConfig(
|
140
151
|
smoothing_algorithm=config.smoothing_algorithm,
|
141
152
|
window_size=config.smoothing_window_size,
|
142
153
|
cooldown_frames=config.smoothing_cooldown_frames,
|
143
|
-
confidence_threshold=config.confidence_threshold,
|
154
|
+
confidence_threshold=config.confidence_threshold,
|
144
155
|
confidence_range_factor=config.smoothing_confidence_range_factor,
|
145
156
|
enable_smoothing=True
|
146
157
|
)
|
147
158
|
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
159
|
+
|
148
160
|
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
161
|
+
# Restore masks after smoothing
|
149
162
|
|
150
|
-
# Advanced tracking (BYTETracker-like)
|
163
|
+
# Step 5: Advanced tracking (BYTETracker-like)
|
151
164
|
try:
|
152
|
-
from ..advanced_tracker import AdvancedTracker
|
153
|
-
from ..advanced_tracker.config import TrackerConfig
|
154
|
-
|
155
|
-
# Create tracker instance if it doesn't exist (preserves state across frames)
|
156
165
|
if self.tracker is None:
|
157
|
-
|
166
|
+
# Configure tracker thresholds based on the use-case confidence threshold so that
|
167
|
+
# low-confidence detections (e.g. < 0.7) can still be initialised as tracks when
|
168
|
+
# the user passes a lower `confidence_threshold` in the post-processing config.
|
169
|
+
if config.confidence_threshold is not None:
|
170
|
+
tracker_config = TrackerConfig(
|
171
|
+
track_high_thresh=float(config.confidence_threshold),
|
172
|
+
# Allow even lower detections to participate in secondary association
|
173
|
+
track_low_thresh=max(0.05, float(config.confidence_threshold) / 2),
|
174
|
+
new_track_thresh=float(config.confidence_threshold)
|
175
|
+
)
|
176
|
+
else:
|
177
|
+
tracker_config = TrackerConfig()
|
158
178
|
self.tracker = AdvancedTracker(tracker_config)
|
159
|
-
self.logger.info(
|
160
|
-
|
161
|
-
|
162
|
-
|
179
|
+
self.logger.info(
|
180
|
+
"Initialized AdvancedTracker for Monitoring and tracking with thresholds: "
|
181
|
+
f"high={tracker_config.track_high_thresh}, "
|
182
|
+
f"low={tracker_config.track_low_thresh}, "
|
183
|
+
f"new={tracker_config.new_track_thresh}"
|
184
|
+
)
|
185
|
+
|
163
186
|
processed_data = self.tracker.update(processed_data)
|
164
|
-
|
165
187
|
except Exception as e:
|
166
188
|
# If advanced tracker fails, fallback to unsmoothed detections
|
167
189
|
self.logger.warning(f"AdvancedTracker failed: {e}")
|
168
190
|
|
169
|
-
# Update
|
191
|
+
# Update tracking state for total count per label
|
170
192
|
self._update_tracking_state(processed_data)
|
171
193
|
|
194
|
+
# ------------------------------------------------------------------ #
|
195
|
+
# Re-attach segmentation masks that were present in the original input
|
196
|
+
# but may have been stripped during smoothing/tracking. We match each
|
197
|
+
# processed detection back to the raw detection with the highest IoU
|
198
|
+
# and copy over its "masks" field (if available).
|
199
|
+
# ------------------------------------------------------------------ #
|
200
|
+
processed_data = self._attach_masks_to_detections(processed_data, raw_processed_data)
|
201
|
+
|
172
202
|
# Update frame counter
|
173
203
|
self._total_frame_counter += 1
|
174
204
|
|
@@ -183,19 +213,20 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
183
213
|
frame_number = start_frame
|
184
214
|
|
185
215
|
# Compute summaries and alerts
|
186
|
-
general_counting_summary = calculate_counting_summary(data)
|
187
|
-
counting_summary = self._count_categories(processed_data, config)
|
188
|
-
# Add total unique
|
189
|
-
total_counts = self.get_total_counts()
|
190
|
-
counting_summary['total_counts'] = total_counts
|
191
|
-
|
216
|
+
general_counting_summary = calculate_counting_summary(data)
|
217
|
+
counting_summary = self._count_categories(processed_data, config)
|
218
|
+
# Add total unique counts after tracking using only local state
|
219
|
+
total_counts = self.get_total_counts()
|
220
|
+
counting_summary['total_counts'] = total_counts
|
221
|
+
|
192
222
|
alerts = self._check_alerts(counting_summary, frame_number, config)
|
193
223
|
predictions = self._extract_predictions(processed_data)
|
194
224
|
|
195
|
-
# Step: Generate structured
|
225
|
+
# Step: Generate structured events and tracking stats with frame-based keys
|
196
226
|
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
197
|
-
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number,
|
198
|
-
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=
|
227
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number,stream_info)
|
228
|
+
# business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=False)
|
229
|
+
business_analytics_list = []
|
199
230
|
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
200
231
|
|
201
232
|
# Extract frame-based dictionaries from the lists
|
@@ -210,8 +241,7 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
210
241
|
"alerts": alerts,
|
211
242
|
"human_text": summary}
|
212
243
|
}
|
213
|
-
|
214
|
-
|
244
|
+
|
215
245
|
context.mark_completed()
|
216
246
|
|
217
247
|
# Build result object following the new pattern
|
@@ -273,8 +303,8 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
273
303
|
"threshold_level": threshold,
|
274
304
|
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
275
305
|
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
276
|
-
|
277
|
-
|
306
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
307
|
+
}
|
278
308
|
})
|
279
309
|
elif category in summary.get("per_category_count", {}):
|
280
310
|
count = summary.get("per_category_count", {})[category]
|
@@ -286,25 +316,27 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
286
316
|
"threshold_level": threshold,
|
287
317
|
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
288
318
|
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
289
|
-
|
290
|
-
|
319
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
320
|
+
}
|
291
321
|
})
|
292
322
|
else:
|
293
323
|
pass
|
294
324
|
return alerts
|
295
325
|
|
296
326
|
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: PotholeConfig,
|
297
|
-
|
327
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
|
298
328
|
Dict]:
|
299
|
-
"""Generate structured
|
300
|
-
|
301
|
-
|
329
|
+
"""Generate structured events for the output format with frame-based keys."""
|
330
|
+
|
331
|
+
# Use frame number as key, fallback to 'current_frame' if not available
|
332
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
333
|
+
incidents=[]
|
302
334
|
total_detections = counting_summary.get("total_count", 0)
|
303
335
|
current_timestamp = self._get_current_timestamp_str(stream_info)
|
304
336
|
camera_info = self.get_camera_info_from_stream(stream_info)
|
305
337
|
|
306
338
|
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
307
|
-
|
339
|
+
|
308
340
|
if total_detections > 0:
|
309
341
|
# Determine event level based on thresholds
|
310
342
|
level = "low"
|
@@ -352,7 +384,7 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
352
384
|
intensity = min(10.0, total_detections / 3.0)
|
353
385
|
self._ascending_alert_list.append(0)
|
354
386
|
|
355
|
-
|
387
|
+
# Generate human text in new format
|
356
388
|
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
357
389
|
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
358
390
|
human_text = "\n".join(human_text_lines)
|
@@ -470,6 +502,7 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
470
502
|
|
471
503
|
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
472
504
|
human_text_lines.append(f"Total Potholes Detected - ")
|
505
|
+
print(total_counts_dict)
|
473
506
|
for cat, count in total_counts_dict.items():
|
474
507
|
if count > 0:
|
475
508
|
human_text_lines.append(f"\t{cat}: {count}")
|
@@ -499,7 +532,7 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
499
532
|
tracking_stats.append(tracking_stat)
|
500
533
|
return tracking_stats
|
501
534
|
|
502
|
-
def _generate_business_analytics(self, counting_summary: Dict,
|
535
|
+
def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: PotholeConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
503
536
|
"""Generate standardized business analytics for the agg_summary structure."""
|
504
537
|
if is_empty:
|
505
538
|
return []
|
@@ -530,6 +563,36 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
530
563
|
|
531
564
|
return [lines]
|
532
565
|
|
566
|
+
|
567
|
+
def _count_categories(self, detections: list, config: PotholeConfig) -> dict:
|
568
|
+
"""
|
569
|
+
Count the number of detections per category and return a summary dict.
|
570
|
+
The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', 'masks', etc.
|
571
|
+
Output structure will include 'track_id' and 'masks' for each detection as per AdvancedTracker output.
|
572
|
+
"""
|
573
|
+
counts = {}
|
574
|
+
valid_detections = []
|
575
|
+
for det in detections:
|
576
|
+
cat = det.get('category', 'unknown')
|
577
|
+
if not all(k in det for k in ['category', 'confidence', 'bounding_box']): # Validate required fields
|
578
|
+
self.logger.warning(f"Skipping invalid detection: {det}")
|
579
|
+
continue
|
580
|
+
counts[cat] = counts.get(cat, 0) + 1
|
581
|
+
valid_detections.append({
|
582
|
+
"bounding_box": det.get("bounding_box"),
|
583
|
+
"category": det.get("category"),
|
584
|
+
"confidence": det.get("confidence"),
|
585
|
+
"track_id": det.get("track_id"),
|
586
|
+
"frame_id": det.get("frame_id"),
|
587
|
+
"masks": det.get("masks", det.get("mask", [])) # Include masks, fallback to empty list
|
588
|
+
})
|
589
|
+
self.logger.debug(f"Valid detections after filtering: {len(valid_detections)}")
|
590
|
+
return {
|
591
|
+
"total_count": sum(counts.values()),
|
592
|
+
"per_category_count": counts,
|
593
|
+
"detections": valid_detections
|
594
|
+
}
|
595
|
+
|
533
596
|
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
534
597
|
"""
|
535
598
|
Get detailed information about track IDs (per frame).
|
@@ -583,12 +646,6 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
583
646
|
"""
|
584
647
|
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
585
648
|
|
586
|
-
|
587
|
-
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
588
|
-
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
589
|
-
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
590
|
-
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
591
|
-
|
592
649
|
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
593
650
|
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
594
651
|
hours = int(timestamp // 3600)
|
@@ -596,6 +653,11 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
596
653
|
seconds = round(float(timestamp % 60),2)
|
597
654
|
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
598
655
|
|
656
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
657
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
658
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
659
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
660
|
+
|
599
661
|
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
600
662
|
"""Get formatted current timestamp based on stream type."""
|
601
663
|
if not stream_info:
|
@@ -671,31 +733,60 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
671
733
|
dt = dt.replace(minute=0, second=0, microsecond=0)
|
672
734
|
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
673
735
|
|
674
|
-
|
736
|
+
# ------------------------------------------------------------------ #
|
737
|
+
# Helper to merge masks back into detections #
|
738
|
+
# ------------------------------------------------------------------ #
|
739
|
+
def _attach_masks_to_detections(
|
740
|
+
self,
|
741
|
+
processed_detections: List[Dict[str, Any]],
|
742
|
+
raw_detections: List[Dict[str, Any]],
|
743
|
+
iou_threshold: float = 0.5,
|
744
|
+
) -> List[Dict[str, Any]]:
|
675
745
|
"""
|
676
|
-
|
677
|
-
|
678
|
-
|
746
|
+
Attach segmentation masks from the original `raw_detections` list to the
|
747
|
+
`processed_detections` list returned after smoothing/tracking.
|
748
|
+
|
749
|
+
Matching between detections is performed using Intersection-over-Union
|
750
|
+
(IoU) of the bounding boxes. For each processed detection we select the
|
751
|
+
raw detection with the highest IoU above `iou_threshold` and copy its
|
752
|
+
`masks` (or `mask`) field. If no suitable match is found, the detection
|
753
|
+
keeps an empty list for `masks` to maintain a consistent schema.
|
679
754
|
"""
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
755
|
+
|
756
|
+
if not processed_detections or not raw_detections:
|
757
|
+
# Nothing to do – ensure masks key exists for downstream logic.
|
758
|
+
for det in processed_detections:
|
759
|
+
det.setdefault("masks", [])
|
760
|
+
return processed_detections
|
761
|
+
|
762
|
+
# Track which raw detections have already been matched to avoid
|
763
|
+
# assigning the same mask to multiple processed detections.
|
764
|
+
used_raw_indices = set()
|
765
|
+
|
766
|
+
for det in processed_detections:
|
767
|
+
best_iou = 0.0
|
768
|
+
best_idx = None
|
769
|
+
|
770
|
+
for idx, raw_det in enumerate(raw_detections):
|
771
|
+
if idx in used_raw_indices:
|
772
|
+
continue
|
773
|
+
|
774
|
+
iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
|
775
|
+
if iou > best_iou:
|
776
|
+
best_iou = iou
|
777
|
+
best_idx = idx
|
778
|
+
|
779
|
+
if best_idx is not None and best_iou >= iou_threshold:
|
780
|
+
raw_det = raw_detections[best_idx]
|
781
|
+
masks = raw_det.get("masks", raw_det.get("mask"))
|
782
|
+
if masks is not None:
|
783
|
+
det["masks"] = masks
|
784
|
+
used_raw_indices.add(best_idx)
|
785
|
+
else:
|
786
|
+
# No adequate match – default to empty list to keep schema consistent.
|
787
|
+
det.setdefault("masks", ["EMPTY"])
|
788
|
+
|
789
|
+
return processed_detections
|
699
790
|
|
700
791
|
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
701
792
|
"""
|
@@ -705,11 +796,13 @@ class PotholeSegmentationUseCase(BaseProcessor):
|
|
705
796
|
{
|
706
797
|
"category": det.get("category", "unknown"),
|
707
798
|
"confidence": det.get("confidence", 0.0),
|
708
|
-
"bounding_box": det.get("bounding_box", {})
|
799
|
+
"bounding_box": det.get("bounding_box", {}),
|
800
|
+
"mask": det.get("mask", det.get("masks", None)) # Accept either key
|
709
801
|
}
|
710
802
|
for det in detections
|
711
803
|
]
|
712
804
|
|
805
|
+
|
713
806
|
# ------------------------------------------------------------------ #
|
714
807
|
# Canonical ID helpers #
|
715
808
|
# ------------------------------------------------------------------ #
|
@@ -181,7 +181,7 @@ matrice/deploy/utils/post_processing/usecases/flower_segmentation.py,sha256=4I7q
|
|
181
181
|
matrice/deploy/utils/post_processing/usecases/gender_detection.py,sha256=DEnCTRew6B7DtPcBQVCTtpd_IQMvMusBcu6nadUg2oM,40107
|
182
182
|
matrice/deploy/utils/post_processing/usecases/leaf.py,sha256=cwgB1ZNxkQFtkk-thSJrkXOGou1ghJr1kqtopb3sLD4,37036
|
183
183
|
matrice/deploy/utils/post_processing/usecases/leaf_disease.py,sha256=bkiLccTdf4KUq3he4eCpBlKXb5exr-WBhQ_oWQ7os68,36225
|
184
|
-
matrice/deploy/utils/post_processing/usecases/license_plate_detection.py,sha256=
|
184
|
+
matrice/deploy/utils/post_processing/usecases/license_plate_detection.py,sha256=a3ccHyw7CTpRFSD9AxeM3_JFW8JDSY-sZ3rIajFlyM4,40420
|
185
185
|
matrice/deploy/utils/post_processing/usecases/mask_detection.py,sha256=MNpCcuefOdW7C8g_x_mNuWYA4mbyg8UNwomwBPoKtr0,39684
|
186
186
|
matrice/deploy/utils/post_processing/usecases/parking.py,sha256=lqTGqcjUZZPFw3tu11Ha8BSsZ311K5--wEZnlVsXakU,34534
|
187
187
|
matrice/deploy/utils/post_processing/usecases/parking_space_detection.py,sha256=xwhkJjGGKcT827URbasi3olYqhd95Sh0zsEIphwzcgY,39561
|
@@ -189,7 +189,7 @@ matrice/deploy/utils/post_processing/usecases/pedestrian_detection.py,sha256=hPF
|
|
189
189
|
matrice/deploy/utils/post_processing/usecases/people_counting.py,sha256=mDJOwcrs9OO4jIbJVr_ItWvjjGP2mgGFYlrP3R-mH2E,76528
|
190
190
|
matrice/deploy/utils/post_processing/usecases/pipeline_detection.py,sha256=VsLTXMAqx0tRw7Olrxqx7SBLolZR7p2aFOrdSXLS-kE,30796
|
191
191
|
matrice/deploy/utils/post_processing/usecases/plaque_segmentation_img.py,sha256=d__a0PkkObYVoC-Q5-2bFVfeyKnQHtB5xVAKVOCeFyk,41925
|
192
|
-
matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py,sha256=
|
192
|
+
matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py,sha256=qAiUUO2sdwzDNNWWBstzAOYgZjoshS3ltpKViy4z_D4,44390
|
193
193
|
matrice/deploy/utils/post_processing/usecases/ppe_compliance.py,sha256=G9P9j9E9nfNJInHJxmK1Lb4daFBlG5hq0aqotTLvFFE,30146
|
194
194
|
matrice/deploy/utils/post_processing/usecases/price_tag_detection.py,sha256=09Tp6MGAHh95s-NSAp-4WC9iCc20sajWApuUBAvgXiQ,39880
|
195
195
|
matrice/deploy/utils/post_processing/usecases/road_lane_detection.py,sha256=V_KxwBtAHSNkyoH8sXw-U-P3J8ToXtX3ncc69gn6Tds,31591
|
@@ -227,8 +227,8 @@ matrice/deployment/camera_manager.py,sha256=ReBZqm1CNXRImKcbcZ4uWAT3TUWkof1D28oB
|
|
227
227
|
matrice/deployment/deployment.py,sha256=PLIUD-PxTaC2Zxb3Y12wUddsryV-OJetjCjLoSUh7S4,48103
|
228
228
|
matrice/deployment/inference_pipeline.py,sha256=bXLgd29ViA7o0c7YWLFJl1otBUQfTPb61jS6VawQB0Y,37918
|
229
229
|
matrice/deployment/streaming_gateway_manager.py,sha256=w5swGsuFVfZIdOm2ZuBHRHlRdYYJMLopLsf2gb91lQ8,20946
|
230
|
-
matrice-1.0.
|
231
|
-
matrice-1.0.
|
232
|
-
matrice-1.0.
|
233
|
-
matrice-1.0.
|
234
|
-
matrice-1.0.
|
230
|
+
matrice-1.0.99182.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
|
231
|
+
matrice-1.0.99182.dist-info/METADATA,sha256=TB_QMqw3qQEIK0ILQtep9PNINCuBd7nXohCeM1gm4Hs,14624
|
232
|
+
matrice-1.0.99182.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
233
|
+
matrice-1.0.99182.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
|
234
|
+
matrice-1.0.99182.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|