matrice 1.0.99128__py3-none-any.whl → 1.0.99130__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/utils/post_processing/usecases/concrete_crack_detection.py +439 -408
- matrice/deploy/utils/post_processing/usecases/weld_defect_detection.py +1 -1
- {matrice-1.0.99128.dist-info → matrice-1.0.99130.dist-info}/METADATA +1 -1
- {matrice-1.0.99128.dist-info → matrice-1.0.99130.dist-info}/RECORD +7 -7
- {matrice-1.0.99128.dist-info → matrice-1.0.99130.dist-info}/WHEEL +0 -0
- {matrice-1.0.99128.dist-info → matrice-1.0.99130.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99128.dist-info → matrice-1.0.99130.dist-info}/top_level.txt +0 -0
@@ -31,7 +31,7 @@ class ConcreteCrackConfig(BaseConfig):
|
|
31
31
|
smoothing_confidence_range_factor: float = 0.5
|
32
32
|
|
33
33
|
#confidence thresholds
|
34
|
-
confidence_threshold: float = 0.
|
34
|
+
confidence_threshold: float = 0.6
|
35
35
|
|
36
36
|
usecase_categories: List[str] = field(
|
37
37
|
default_factory=lambda: ['Cracks']
|
@@ -45,159 +45,26 @@ class ConcreteCrackConfig(BaseConfig):
|
|
45
45
|
|
46
46
|
index_to_category: Optional[Dict[int, str]] = field(
|
47
47
|
default_factory=lambda: {
|
48
|
-
0: "Cracks"
|
48
|
+
0: "Cracks",
|
49
49
|
}
|
50
50
|
)
|
51
51
|
|
52
52
|
|
53
53
|
class ConcreteCrackUseCase(BaseProcessor):
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
# Collect all track_ids in this frame
|
59
|
-
frame_track_ids = set()
|
60
|
-
for det in detections:
|
61
|
-
tid = det.get('track_id')
|
62
|
-
if tid is not None:
|
63
|
-
frame_track_ids.add(tid)
|
64
|
-
# Use persistent total set for unique counting
|
65
|
-
total_track_ids = set()
|
66
|
-
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
67
|
-
total_track_ids.update(s)
|
68
|
-
return {
|
69
|
-
"total_count": len(total_track_ids),
|
70
|
-
"current_frame_count": len(frame_track_ids),
|
71
|
-
"total_unique_track_ids": len(total_track_ids),
|
72
|
-
"current_frame_track_ids": list(frame_track_ids),
|
73
|
-
"last_update_time": time.time(),
|
74
|
-
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
75
|
-
}
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
def _update_tracking_state(self, detections: list):
|
82
|
-
"""
|
83
|
-
Track unique categories track_ids per category for total count after tracking.
|
84
|
-
Applies canonical ID merging to avoid duplicate counting when the underlying
|
85
|
-
tracker loses an object temporarily and assigns a new ID.
|
86
|
-
"""
|
87
|
-
# Lazily initialise storage dicts
|
88
|
-
if not hasattr(self, "_per_category_total_track_ids"):
|
89
|
-
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
90
|
-
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
91
|
-
|
92
|
-
for det in detections:
|
93
|
-
cat = det.get("category")
|
94
|
-
raw_track_id = det.get("track_id")
|
95
|
-
if cat not in self.target_categories or raw_track_id is None:
|
96
|
-
continue
|
97
|
-
bbox = det.get("bounding_box", det.get("bbox"))
|
98
|
-
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
99
|
-
# Propagate canonical ID back to detection so downstream logic uses it
|
100
|
-
det["track_id"] = canonical_id
|
101
|
-
|
102
|
-
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
103
|
-
self._current_frame_track_ids[cat].add(canonical_id)
|
104
|
-
|
105
|
-
def get_total_counts(self):
|
106
|
-
"""
|
107
|
-
Return total unique track_id count for each category.
|
108
|
-
"""
|
109
|
-
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
110
|
-
|
111
|
-
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
112
|
-
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
113
|
-
hours = int(timestamp // 3600)
|
114
|
-
minutes = int((timestamp % 3600) // 60)
|
115
|
-
seconds = timestamp % 60
|
116
|
-
return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
|
117
|
-
|
118
|
-
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
119
|
-
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
120
|
-
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
121
|
-
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
122
|
-
|
123
|
-
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
|
124
|
-
"""Get formatted current timestamp based on stream type."""
|
125
|
-
if not stream_info:
|
126
|
-
return "00:00:00.00"
|
127
|
-
|
128
|
-
is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
129
|
-
|
130
|
-
# if is_video_chunk:
|
131
|
-
# # For video chunks, use video_timestamp from stream_info
|
132
|
-
# video_timestamp = stream_info.get("video_timestamp", 0.0)
|
133
|
-
# return self._format_timestamp_for_video(video_timestamp)
|
134
|
-
if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
135
|
-
# If video format, return video timestamp
|
136
|
-
stream_time_str = stream_info.get("video_timestamp", "")
|
137
|
-
return stream_time_str[:8]
|
138
|
-
else:
|
139
|
-
# For streams, use stream_time from stream_info
|
140
|
-
stream_time_str = stream_info.get("stream_time", "")
|
141
|
-
if stream_time_str:
|
142
|
-
# Parse the high precision timestamp string to get timestamp
|
143
|
-
try:
|
144
|
-
# Remove " UTC" suffix and parse
|
145
|
-
timestamp_str = stream_time_str.replace(" UTC", "")
|
146
|
-
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
147
|
-
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
148
|
-
return self._format_timestamp_for_stream(timestamp)
|
149
|
-
except:
|
150
|
-
# Fallback to current time if parsing fails
|
151
|
-
return self._format_timestamp_for_stream(time.time())
|
152
|
-
else:
|
153
|
-
return self._format_timestamp_for_stream(time.time())
|
154
|
-
|
155
|
-
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
|
156
|
-
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
157
|
-
if not stream_info:
|
158
|
-
return "00:00:00"
|
159
|
-
|
160
|
-
is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
161
|
-
|
162
|
-
if is_video_chunk:
|
163
|
-
# For video chunks, start from 00:00:00
|
164
|
-
return "00:00:00"
|
165
|
-
elif stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
166
|
-
# If video format, start from 00:00:00
|
167
|
-
return "00:00:00"
|
168
|
-
else:
|
169
|
-
# For streams, use tracking start time or current time with minutes/seconds reset
|
170
|
-
if self._tracking_start_time is None:
|
171
|
-
# Try to extract timestamp from stream_time string
|
172
|
-
stream_time_str = stream_info.get("stream_time", "")
|
173
|
-
if stream_time_str:
|
174
|
-
try:
|
175
|
-
# Remove " UTC" suffix and parse
|
176
|
-
timestamp_str = stream_time_str.replace(" UTC", "")
|
177
|
-
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
178
|
-
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
179
|
-
except:
|
180
|
-
# Fallback to current time if parsing fails
|
181
|
-
self._tracking_start_time = time.time()
|
182
|
-
else:
|
183
|
-
self._tracking_start_time = time.time()
|
184
|
-
|
185
|
-
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
186
|
-
# Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
|
187
|
-
dt = dt.replace(minute=0, second=0, microsecond=0)
|
188
|
-
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
189
|
-
|
190
|
-
""" Monitoring use case with smoothing and alerting."""
|
191
|
-
|
54
|
+
# Human-friendly display names for categories
|
55
|
+
CATEGORY_DISPLAY = {
|
56
|
+
"Cracks": "Cracks",
|
57
|
+
}
|
192
58
|
def __init__(self):
|
193
59
|
super().__init__("concrete_crack_detection")
|
194
60
|
self.category = "general"
|
195
61
|
|
62
|
+
self.CASE_TYPE: Optional[str] = 'concrete_crack_detection'
|
63
|
+
self.CASE_VERSION: Optional[str] = '1.3'
|
64
|
+
|
196
65
|
# List of categories to track
|
197
66
|
self.target_categories = ["Cracks"]
|
198
67
|
|
199
|
-
|
200
|
-
|
201
68
|
# Initialize smoothing tracker
|
202
69
|
self.smoothing_tracker = None
|
203
70
|
|
@@ -211,19 +78,15 @@ class ConcreteCrackUseCase(BaseProcessor):
|
|
211
78
|
# Track start time for "TOTAL SINCE" calculation
|
212
79
|
self._tracking_start_time = None
|
213
80
|
|
214
|
-
# ------------------------------------------------------------------ #
|
215
|
-
# Canonical tracking aliasing to avoid duplicate counts #
|
216
|
-
# ------------------------------------------------------------------ #
|
217
|
-
# Maps raw tracker-generated IDs to stable canonical IDs that persist
|
218
|
-
# even if the underlying tracker re-assigns a new ID after a short
|
219
|
-
# interruption. This mirrors the logic used in people_counting to
|
220
|
-
# provide accurate unique counting.
|
221
81
|
self._track_aliases: Dict[Any, Any] = {}
|
222
82
|
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
223
83
|
# Tunable parameters – adjust if necessary for specific scenarios
|
224
84
|
self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
|
225
85
|
self._track_merge_time_window: float = 7.0 # seconds within which to merge
|
226
86
|
|
87
|
+
self._ascending_alert_list: List[int] = []
|
88
|
+
self.current_incident_end_timestamp: str = "N/A"
|
89
|
+
|
227
90
|
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
228
91
|
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
229
92
|
"""
|
@@ -249,6 +112,7 @@ class ConcreteCrackUseCase(BaseProcessor):
|
|
249
112
|
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
250
113
|
else:
|
251
114
|
processed_data = data
|
115
|
+
|
252
116
|
self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
|
253
117
|
|
254
118
|
# Step 2: Apply category mapping if provided
|
@@ -274,7 +138,6 @@ class ConcreteCrackUseCase(BaseProcessor):
|
|
274
138
|
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
275
139
|
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
276
140
|
|
277
|
-
|
278
141
|
# Advanced tracking (BYTETracker-like)
|
279
142
|
try:
|
280
143
|
from ..advanced_tracker import AdvancedTracker
|
@@ -294,9 +157,6 @@ class ConcreteCrackUseCase(BaseProcessor):
|
|
294
157
|
# If advanced tracker fails, fallback to unsmoothed detections
|
295
158
|
self.logger.warning(f"AdvancedTracker failed: {e}")
|
296
159
|
|
297
|
-
|
298
|
-
|
299
|
-
|
300
160
|
# Update tracking state for total count per label
|
301
161
|
self._update_tracking_state(processed_data)
|
302
162
|
|
@@ -314,218 +174,490 @@ class ConcreteCrackUseCase(BaseProcessor):
|
|
314
174
|
frame_number = start_frame
|
315
175
|
|
316
176
|
# Compute summaries and alerts
|
317
|
-
general_counting_summary = calculate_counting_summary(data)
|
318
|
-
counting_summary = self._count_categories(processed_data, config)
|
177
|
+
general_counting_summary = calculate_counting_summary(data)
|
178
|
+
counting_summary = self._count_categories(processed_data, config)
|
319
179
|
# Add total unique counts after tracking using only local state
|
320
|
-
total_counts = self.get_total_counts()
|
321
|
-
counting_summary['total_counts'] = total_counts
|
322
|
-
|
323
|
-
alerts = self._check_alerts(counting_summary, config)
|
324
|
-
predictions = self._extract_predictions(processed_data)
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
180
|
+
total_counts = self.get_total_counts()
|
181
|
+
counting_summary['total_counts'] = total_counts
|
182
|
+
|
183
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
184
|
+
predictions = self._extract_predictions(processed_data)
|
185
|
+
|
186
|
+
# Step: Generate structured incidents, tracking stats and business analytics with frame-based keys
|
187
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
188
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
189
|
+
# business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=True)
|
190
|
+
business_analytics_list = []
|
191
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
331
192
|
|
332
193
|
# Extract frame-based dictionaries from the lists
|
333
|
-
|
194
|
+
incidents = incidents_list[0] if incidents_list else {}
|
334
195
|
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
335
|
-
|
196
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
197
|
+
summary = summary_list[0] if summary_list else {}
|
198
|
+
agg_summary = {str(frame_number): {
|
199
|
+
"incidents": incidents,
|
200
|
+
"tracking_stats": tracking_stats,
|
201
|
+
"business_analytics": business_analytics,
|
202
|
+
"alerts": alerts,
|
203
|
+
"human_text": summary}
|
204
|
+
}
|
205
|
+
|
336
206
|
context.mark_completed()
|
337
207
|
|
338
|
-
# Build result object
|
208
|
+
# Build result object following the new pattern
|
209
|
+
|
339
210
|
result = self.create_result(
|
340
|
-
data={
|
341
|
-
"counting_summary": counting_summary,
|
342
|
-
"general_counting_summary": general_counting_summary,
|
343
|
-
"alerts": alerts,
|
344
|
-
"total_detections": counting_summary.get("total_count", 0),
|
345
|
-
"events": events,
|
346
|
-
"tracking_stats": tracking_stats,
|
347
|
-
},
|
211
|
+
data={"agg_summary": agg_summary},
|
348
212
|
usecase=self.name,
|
349
213
|
category=self.category,
|
350
214
|
context=context
|
351
215
|
)
|
352
|
-
|
353
|
-
result.insights = insights
|
354
|
-
result.predictions = predictions
|
216
|
+
|
355
217
|
return result
|
356
218
|
|
219
|
+
def _check_alerts(self, summary: dict, frame_number:Any, config: ConcreteCrackConfig) -> List[Dict]:
|
220
|
+
"""
|
221
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
222
|
+
"""
|
223
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
224
|
+
'''
|
225
|
+
Determine if the trend is ascending or descending based on actual value progression.
|
226
|
+
Now works with values 0,1,2,3 (not just binary).
|
227
|
+
'''
|
228
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
229
|
+
if len(window) < 2:
|
230
|
+
return True # not enough data to determine trend
|
231
|
+
increasing = 0
|
232
|
+
total = 0
|
233
|
+
for i in range(1, len(window)):
|
234
|
+
if window[i] >= window[i - 1]:
|
235
|
+
increasing += 1
|
236
|
+
total += 1
|
237
|
+
ratio = increasing / total
|
238
|
+
if ratio >= threshold:
|
239
|
+
return True
|
240
|
+
elif ratio <= (1 - threshold):
|
241
|
+
return False
|
242
|
+
|
243
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
244
|
+
alerts = []
|
245
|
+
total_detections = summary.get("total_count", 0) #CURRENT combined total count of all classes
|
246
|
+
total_counts_dict = summary.get("total_counts", {}) #TOTAL cumulative counts per class
|
247
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
|
248
|
+
per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
|
357
249
|
|
250
|
+
if not config.alert_config:
|
251
|
+
return alerts
|
358
252
|
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
"""Generate structured events for the output format with frame-based keys."""
|
363
|
-
from datetime import datetime, timezone
|
253
|
+
total = summary.get("total_count", 0)
|
254
|
+
#self._ascending_alert_list
|
255
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
364
256
|
|
365
|
-
|
366
|
-
|
367
|
-
events = [{frame_key: []}]
|
368
|
-
frame_events = events[0][frame_key]
|
369
|
-
total_detections = counting_summary.get("total_count", 0)
|
257
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
258
|
+
if category == "all" and total > threshold:
|
370
259
|
|
260
|
+
alerts.append({
|
261
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
262
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
263
|
+
"incident_category": self.CASE_TYPE,
|
264
|
+
"threshold_level": threshold,
|
265
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
266
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
267
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
268
|
+
}
|
269
|
+
})
|
270
|
+
elif category in summary.get("per_category_count", {}):
|
271
|
+
count = summary.get("per_category_count", {})[category]
|
272
|
+
if count > threshold: # Fixed logic: alert when EXCEEDING threshold
|
273
|
+
alerts.append({
|
274
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
275
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
276
|
+
"incident_category": self.CASE_TYPE,
|
277
|
+
"threshold_level": threshold,
|
278
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
279
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
280
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
281
|
+
}
|
282
|
+
})
|
283
|
+
else:
|
284
|
+
pass
|
285
|
+
return alerts
|
286
|
+
|
287
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: ConcreteCrackConfig,
|
288
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
|
289
|
+
Dict]:
|
290
|
+
"""Generate structured incidents for the output format with frame-based keys."""
|
291
|
+
|
292
|
+
incidents = []
|
293
|
+
total_detections = counting_summary.get("total_count", 0)
|
294
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
295
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
296
|
+
|
297
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
298
|
+
|
371
299
|
if total_detections > 0:
|
372
300
|
# Determine event level based on thresholds
|
373
|
-
level = "
|
301
|
+
level = "low"
|
374
302
|
intensity = 5.0
|
303
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
304
|
+
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
305
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
306
|
+
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
307
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
308
|
+
self.current_incident_end_timestamp = current_timestamp
|
309
|
+
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
310
|
+
self.current_incident_end_timestamp = 'N/A'
|
311
|
+
|
375
312
|
if config.alert_config and config.alert_config.count_thresholds:
|
376
313
|
threshold = config.alert_config.count_thresholds.get("all", 15)
|
377
314
|
intensity = min(10.0, (total_detections / threshold) * 10)
|
378
315
|
|
379
|
-
if intensity >=
|
316
|
+
if intensity >= 9:
|
380
317
|
level = "critical"
|
318
|
+
self._ascending_alert_list.append(3)
|
319
|
+
elif intensity >= 7:
|
320
|
+
level = "significant"
|
321
|
+
self._ascending_alert_list.append(2)
|
381
322
|
elif intensity >= 5:
|
382
|
-
level = "
|
323
|
+
level = "medium"
|
324
|
+
self._ascending_alert_list.append(1)
|
383
325
|
else:
|
384
|
-
level = "
|
326
|
+
level = "low"
|
327
|
+
self._ascending_alert_list.append(0)
|
385
328
|
else:
|
386
|
-
if total_detections >
|
329
|
+
if total_detections > 30:
|
387
330
|
level = "critical"
|
331
|
+
intensity = 10.0
|
332
|
+
self._ascending_alert_list.append(3)
|
333
|
+
elif total_detections > 25:
|
334
|
+
level = "significant"
|
388
335
|
intensity = 9.0
|
336
|
+
self._ascending_alert_list.append(2)
|
389
337
|
elif total_detections > 15:
|
390
|
-
level = "
|
338
|
+
level = "medium"
|
391
339
|
intensity = 7.0
|
340
|
+
self._ascending_alert_list.append(1)
|
392
341
|
else:
|
393
|
-
level = "
|
342
|
+
level = "low"
|
394
343
|
intensity = min(10.0, total_detections / 3.0)
|
344
|
+
self._ascending_alert_list.append(0)
|
395
345
|
|
396
346
|
# Generate human text in new format
|
397
|
-
human_text_lines = ["
|
398
|
-
human_text_lines.append(f"
|
347
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
348
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
399
349
|
human_text = "\n".join(human_text_lines)
|
400
350
|
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
"
|
408
|
-
"
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
# Add alert events
|
419
|
-
for alert in alerts:
|
420
|
-
total_detections = counting_summary.get("total_count", 0)
|
421
|
-
intensity_message = "ALERT: Low congestion in the scene"
|
422
|
-
if config.alert_config and config.alert_config.count_thresholds:
|
423
|
-
threshold = config.alert_config.count_thresholds.get("all", 15)
|
424
|
-
percentage = (total_detections / threshold) * 100 if threshold > 0 else 0
|
425
|
-
if percentage < 20:
|
426
|
-
intensity_message = "ALERT: Low congestion in the scene"
|
427
|
-
elif percentage <= 50:
|
428
|
-
intensity_message = "ALERT: Moderate congestion in the scene"
|
429
|
-
elif percentage <= 70:
|
430
|
-
intensity_message = "ALERT: Heavy congestion in the scene"
|
431
|
-
else:
|
432
|
-
intensity_message = "ALERT: Severe congestion in the scene"
|
433
|
-
else:
|
434
|
-
if total_detections > 15:
|
435
|
-
intensity_message = "ALERT: Heavy congestion in the scene"
|
436
|
-
elif total_detections == 1:
|
437
|
-
intensity_message = "ALERT: Low congestion in the scene"
|
438
|
-
else:
|
439
|
-
intensity_message = "ALERT: Moderate congestion in the scene"
|
440
|
-
|
441
|
-
alert_event = {
|
442
|
-
"type": alert.get("type", "congestion_alert"),
|
443
|
-
"stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
|
444
|
-
"level": alert.get("severity", "warning"),
|
445
|
-
"intensity": 8.0,
|
446
|
-
"config": {
|
447
|
-
"min_value": 0,
|
448
|
-
"max_value": 10,
|
449
|
-
"level_settings": {"info": 2, "warning": 5, "critical": 7}
|
450
|
-
},
|
451
|
-
"application_name": "Congestion Alert System",
|
452
|
-
"application_version": "1.2",
|
453
|
-
"location_info": alert.get("zone"),
|
454
|
-
"human_text": f"{datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')} : {intensity_message}"
|
455
|
-
}
|
456
|
-
frame_events.append(alert_event)
|
351
|
+
alert_settings=[]
|
352
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
353
|
+
alert_settings.append({
|
354
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
355
|
+
"incident_category": self.CASE_TYPE,
|
356
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
357
|
+
"ascending": True,
|
358
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
359
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
360
|
+
}
|
361
|
+
})
|
362
|
+
|
363
|
+
event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
|
364
|
+
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
365
|
+
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
366
|
+
level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
|
367
|
+
incidents.append(event)
|
457
368
|
|
458
|
-
|
369
|
+
else:
|
370
|
+
self._ascending_alert_list.append(0)
|
371
|
+
incidents.append({})
|
372
|
+
|
373
|
+
return incidents
|
459
374
|
|
460
375
|
def _generate_tracking_stats(
|
461
376
|
self,
|
462
377
|
counting_summary: Dict,
|
463
|
-
|
464
|
-
summary: str,
|
378
|
+
alerts: List,
|
465
379
|
config: ConcreteCrackConfig,
|
466
380
|
frame_number: Optional[int] = None,
|
467
381
|
stream_info: Optional[Dict[str, Any]] = None
|
468
382
|
) -> List[Dict]:
|
469
|
-
"""Generate structured tracking stats
|
470
|
-
|
471
|
-
|
472
|
-
|
383
|
+
"""Generate structured tracking stats matching eg.json format."""
|
384
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
385
|
+
|
386
|
+
# frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
387
|
+
# tracking_stats = [{frame_key: []}]
|
388
|
+
# frame_tracking_stats = tracking_stats[0][frame_key]
|
389
|
+
tracking_stats = []
|
390
|
+
|
391
|
+
total_detections = counting_summary.get("total_count", 0) #CURRENT total count of all classes
|
392
|
+
total_counts_dict = counting_summary.get("total_counts", {}) #TOTAL cumulative counts per class
|
393
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
|
394
|
+
per_category_count = counting_summary.get("per_category_count", {}) #CURRENT count per class
|
395
|
+
|
396
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
397
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
398
|
+
|
399
|
+
# Create high precision timestamps for input_timestamp and reset_timestamp
|
400
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
401
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
402
|
+
|
403
|
+
|
404
|
+
# Build total_counts array in expected format
|
405
|
+
total_counts = []
|
406
|
+
for cat, count in total_counts_dict.items():
|
407
|
+
if count > 0:
|
408
|
+
total_counts.append({
|
409
|
+
"category": cat,
|
410
|
+
"count": count
|
411
|
+
})
|
412
|
+
|
413
|
+
# Build current_counts array in expected format
|
414
|
+
current_counts = []
|
415
|
+
for cat, count in per_category_count.items():
|
416
|
+
if count > 0 or total_detections > 0: # Include even if 0 when there are detections
|
417
|
+
current_counts.append({
|
418
|
+
"category": cat,
|
419
|
+
"count": count
|
420
|
+
})
|
421
|
+
|
422
|
+
# Prepare detections without confidence scores (as per eg.json)
|
423
|
+
detections = []
|
424
|
+
for detection in counting_summary.get("detections", []):
|
425
|
+
bbox = detection.get("bounding_box", {})
|
426
|
+
category = detection.get("category", "person")
|
427
|
+
# Include segmentation if available (like in eg.json)
|
428
|
+
if detection.get("masks"):
|
429
|
+
segmentation= detection.get("masks", [])
|
430
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
431
|
+
elif detection.get("segmentation"):
|
432
|
+
segmentation= detection.get("segmentation")
|
433
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
434
|
+
elif detection.get("mask"):
|
435
|
+
segmentation= detection.get("mask")
|
436
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
437
|
+
else:
|
438
|
+
detection_obj = self.create_detection_object(category, bbox)
|
439
|
+
detections.append(detection_obj)
|
440
|
+
|
441
|
+
# Build alert_settings array in expected format
|
442
|
+
alert_settings = []
|
443
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
444
|
+
alert_settings.append({
|
445
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
446
|
+
"incident_category": self.CASE_TYPE,
|
447
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
448
|
+
"ascending": True,
|
449
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
450
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
451
|
+
}
|
452
|
+
})
|
453
|
+
|
454
|
+
# Generate human_text in expected format
|
455
|
+
human_text_lines = [f"Tracking Statistics:"]
|
456
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
|
457
|
+
|
458
|
+
for cat, count in per_category_count.items():
|
459
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
460
|
+
|
461
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
462
|
+
for cat, count in total_counts_dict.items():
|
463
|
+
if count > 0:
|
464
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
465
|
+
|
466
|
+
if alerts:
|
467
|
+
for alert in alerts:
|
468
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
469
|
+
else:
|
470
|
+
human_text_lines.append("Alerts: None")
|
473
471
|
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
472
|
+
human_text = "\n".join(human_text_lines)
|
473
|
+
reset_settings=[
|
474
|
+
{
|
475
|
+
"interval_type": "daily",
|
476
|
+
"reset_time": {
|
477
|
+
"value": 9,
|
478
|
+
"time_unit": "hour"
|
479
|
+
}
|
480
|
+
}
|
481
|
+
]
|
478
482
|
|
479
|
-
|
483
|
+
tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
|
484
|
+
detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
485
|
+
reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
|
486
|
+
reset_time=high_precision_reset_timestamp)
|
480
487
|
|
481
|
-
|
482
|
-
|
488
|
+
tracking_stats.append(tracking_stat)
|
489
|
+
return tracking_stats
|
483
490
|
|
484
|
-
|
491
|
+
def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: ConcreteCrackConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
492
|
+
"""Generate standardized business analytics for the agg_summary structure."""
|
493
|
+
if is_empty:
|
494
|
+
return []
|
485
495
|
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
elif len(category_counts) == 2:
|
493
|
-
detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
|
494
|
-
else:
|
495
|
-
detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
496
|
-
human_text_lines.append(f"\t- {detection_text}")
|
497
|
-
else:
|
498
|
-
human_text_lines.append(f"\t- No detections")
|
496
|
+
#-----IF YOUR USECASE NEEDS BUSINESS ANALYTICS, YOU CAN USE THIS FUNCTION------#
|
497
|
+
#camera_info = self.get_camera_info_from_stream(stream_info)
|
498
|
+
# business_analytics = self.create_business_analytics(nalysis_name, statistics,
|
499
|
+
# human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
500
|
+
# reset_settings)
|
501
|
+
# return business_analytics
|
499
502
|
|
500
|
-
|
503
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
504
|
+
"""
|
505
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
506
|
+
"""
|
507
|
+
lines = {}
|
508
|
+
lines["Application Name"] = self.CASE_TYPE
|
509
|
+
lines["Application Version"] = self.CASE_VERSION
|
510
|
+
if len(incidents) > 0:
|
511
|
+
lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
512
|
+
if len(tracking_stats) > 0:
|
513
|
+
lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
514
|
+
if len(business_analytics) > 0:
|
515
|
+
lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
501
516
|
|
502
|
-
|
503
|
-
|
504
|
-
human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
|
505
|
-
# Add category-wise counts
|
506
|
-
if total_counts:
|
507
|
-
for cat, count in total_counts.items():
|
508
|
-
if count > 0: # Only include categories with non-zero counts
|
509
|
-
human_text_lines.append(f"\t- {cat}: {count}")
|
517
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
518
|
+
lines["Summary"] = "No Summary Data"
|
510
519
|
|
511
|
-
|
520
|
+
return [lines]
|
512
521
|
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
522
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
523
|
+
"""
|
524
|
+
Get detailed information about track IDs (per frame).
|
525
|
+
"""
|
526
|
+
# Collect all track_ids in this frame
|
527
|
+
frame_track_ids = set()
|
528
|
+
for det in detections:
|
529
|
+
tid = det.get('track_id')
|
530
|
+
if tid is not None:
|
531
|
+
frame_track_ids.add(tid)
|
532
|
+
# Use persistent total set for unique counting
|
533
|
+
total_track_ids = set()
|
534
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
535
|
+
total_track_ids.update(s)
|
536
|
+
return {
|
537
|
+
"total_count": len(total_track_ids),
|
538
|
+
"current_frame_count": len(frame_track_ids),
|
539
|
+
"total_unique_track_ids": len(total_track_ids),
|
540
|
+
"current_frame_track_ids": list(frame_track_ids),
|
541
|
+
"last_update_time": time.time(),
|
542
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
525
543
|
}
|
526
544
|
|
527
|
-
|
528
|
-
|
545
|
+
def _update_tracking_state(self, detections: list):
|
546
|
+
"""
|
547
|
+
Track unique categories track_ids per category for total count after tracking.
|
548
|
+
Applies canonical ID merging to avoid duplicate counting when the underlying
|
549
|
+
tracker loses an object temporarily and assigns a new ID.
|
550
|
+
"""
|
551
|
+
# Lazily initialise storage dicts
|
552
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
553
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
554
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
555
|
+
|
556
|
+
for det in detections:
|
557
|
+
cat = det.get("category")
|
558
|
+
raw_track_id = det.get("track_id")
|
559
|
+
if cat not in self.target_categories or raw_track_id is None:
|
560
|
+
continue
|
561
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
562
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
563
|
+
# Propagate canonical ID back to detection so downstream logic uses it
|
564
|
+
det["track_id"] = canonical_id
|
565
|
+
|
566
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
567
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
568
|
+
|
569
|
+
def get_total_counts(self):
|
570
|
+
"""
|
571
|
+
Return total unique track_id count for each category.
|
572
|
+
"""
|
573
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
574
|
+
|
575
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
576
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
577
|
+
hours = int(timestamp // 3600)
|
578
|
+
minutes = int((timestamp % 3600) // 60)
|
579
|
+
seconds = round(float(timestamp % 60),2)
|
580
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
581
|
+
|
582
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
583
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
584
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
585
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
586
|
+
|
587
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
588
|
+
"""Get formatted current timestamp based on stream type."""
|
589
|
+
if not stream_info:
|
590
|
+
return "00:00:00.00"
|
591
|
+
# is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
592
|
+
if precision:
|
593
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
594
|
+
if frame_id:
|
595
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
596
|
+
else:
|
597
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
598
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
599
|
+
return stream_time_str
|
600
|
+
else:
|
601
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
602
|
+
|
603
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
604
|
+
if frame_id:
|
605
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
606
|
+
else:
|
607
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
608
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
609
|
+
return stream_time_str
|
610
|
+
else:
|
611
|
+
# For streams, use stream_time from stream_info
|
612
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
613
|
+
if stream_time_str:
|
614
|
+
# Parse the high precision timestamp string to get timestamp
|
615
|
+
try:
|
616
|
+
# Remove " UTC" suffix and parse
|
617
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
618
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
619
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
620
|
+
return self._format_timestamp_for_stream(timestamp)
|
621
|
+
except:
|
622
|
+
# Fallback to current time if parsing fails
|
623
|
+
return self._format_timestamp_for_stream(time.time())
|
624
|
+
else:
|
625
|
+
return self._format_timestamp_for_stream(time.time())
|
626
|
+
|
627
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
628
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
629
|
+
if not stream_info:
|
630
|
+
return "00:00:00"
|
631
|
+
if precision:
|
632
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
633
|
+
return "00:00:00"
|
634
|
+
else:
|
635
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
636
|
+
|
637
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
638
|
+
# If video format, start from 00:00:00
|
639
|
+
return "00:00:00"
|
640
|
+
else:
|
641
|
+
# For streams, use tracking start time or current time with minutes/seconds reset
|
642
|
+
if self._tracking_start_time is None:
|
643
|
+
# Try to extract timestamp from stream_time string
|
644
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
645
|
+
if stream_time_str:
|
646
|
+
try:
|
647
|
+
# Remove " UTC" suffix and parse
|
648
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
649
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
650
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
651
|
+
except:
|
652
|
+
# Fallback to current time if parsing fails
|
653
|
+
self._tracking_start_time = time.time()
|
654
|
+
else:
|
655
|
+
self._tracking_start_time = time.time()
|
656
|
+
|
657
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
658
|
+
# Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
|
659
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
660
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
529
661
|
|
530
662
|
def _count_categories(self, detections: list, config: ConcreteCrackConfig) -> dict:
|
531
663
|
"""
|
@@ -553,83 +685,6 @@ class ConcreteCrackUseCase(BaseProcessor):
|
|
553
685
|
]
|
554
686
|
}
|
555
687
|
|
556
|
-
# Human-friendly display names for categories
|
557
|
-
CATEGORY_DISPLAY = {
|
558
|
-
"Cracks": "Cracks"
|
559
|
-
}
|
560
|
-
|
561
|
-
def _generate_insights(self, summary: dict, config: ConcreteCrackConfig) -> List[str]:
|
562
|
-
"""
|
563
|
-
Generate human-readable insights for each category.
|
564
|
-
"""
|
565
|
-
insights = []
|
566
|
-
per_cat = summary.get("per_category_count", {})
|
567
|
-
total_detections = summary.get("total_count", 0)
|
568
|
-
|
569
|
-
if total_detections == 0:
|
570
|
-
insights.append("No detections in the scene")
|
571
|
-
return insights
|
572
|
-
insights.append(f"EVENT: Detected {total_detections} in the scene")
|
573
|
-
# Intensity calculation based on threshold percentage
|
574
|
-
intensity_threshold = None
|
575
|
-
if (config.alert_config and
|
576
|
-
config.alert_config.count_thresholds and
|
577
|
-
"all" in config.alert_config.count_thresholds):
|
578
|
-
intensity_threshold = config.alert_config.count_thresholds["all"]
|
579
|
-
|
580
|
-
if intensity_threshold is not None:
|
581
|
-
# Calculate percentage relative to threshold
|
582
|
-
percentage = (total_detections / intensity_threshold) * 100
|
583
|
-
|
584
|
-
if percentage < 20:
|
585
|
-
insights.append(f"INTENSITY: Low congestion in the scene ({percentage:.1f}% of capacity)")
|
586
|
-
elif percentage <= 50:
|
587
|
-
insights.append(f"INTENSITY: Moderate congestion in the scene ({percentage:.1f}% of capacity)")
|
588
|
-
elif percentage <= 70:
|
589
|
-
insights.append(f"INTENSITY: Heavy congestion in the scene ({percentage:.1f}% of capacity)")
|
590
|
-
else:
|
591
|
-
insights.append(f"INTENSITY: Severe congestion in the scene ({percentage:.1f}% of capacity)")
|
592
|
-
|
593
|
-
|
594
|
-
for cat, count in per_cat.items():
|
595
|
-
display = self.CATEGORY_DISPLAY.get(cat, cat)
|
596
|
-
insights.append(f"{display}:{count}")
|
597
|
-
return insights
|
598
|
-
|
599
|
-
def _check_alerts(self, summary: dict, config: ConcreteCrackConfig) -> List[Dict]:
|
600
|
-
"""
|
601
|
-
Check if any alert thresholds are exceeded and return alert dicts.
|
602
|
-
"""
|
603
|
-
alerts = []
|
604
|
-
if not config.alert_config:
|
605
|
-
return alerts
|
606
|
-
total = summary.get("total_count", 0)
|
607
|
-
if config.alert_config.count_thresholds:
|
608
|
-
for category, threshold in config.alert_config.count_thresholds.items():
|
609
|
-
if category == "all" and total >= threshold:
|
610
|
-
timestamp = datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')
|
611
|
-
alert_description = f"detections count ({total}) exceeds threshold ({threshold})"
|
612
|
-
alerts.append({
|
613
|
-
"type": "count_threshold",
|
614
|
-
"severity": "warning",
|
615
|
-
"message": f"Total detections count ({total}) exceeds threshold ({threshold})",
|
616
|
-
"category": category,
|
617
|
-
"current_count": total,
|
618
|
-
"threshold": threshold
|
619
|
-
})
|
620
|
-
elif category in summary.get("per_category_count", {}):
|
621
|
-
count = summary.get("per_category_count", {})[category]
|
622
|
-
if count >= threshold:
|
623
|
-
alerts.append({
|
624
|
-
"type": "count_threshold",
|
625
|
-
"severity": "warning",
|
626
|
-
"message": f"{category} count ({count}) exceeds threshold ({threshold})",
|
627
|
-
"category": category,
|
628
|
-
"current_count": count,
|
629
|
-
"threshold": threshold
|
630
|
-
})
|
631
|
-
return alerts
|
632
|
-
|
633
688
|
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
634
689
|
"""
|
635
690
|
Extract prediction details for output (category, confidence, bounding box).
|
@@ -643,30 +698,6 @@ class ConcreteCrackUseCase(BaseProcessor):
|
|
643
698
|
for det in detections
|
644
699
|
]
|
645
700
|
|
646
|
-
def _generate_summary(self, summary: dict, alerts: List) -> str:
|
647
|
-
"""
|
648
|
-
Generate a human_text string for the result, including per-category insights if available.
|
649
|
-
Adds a tab before each label for better formatting.
|
650
|
-
Also always includes the cumulative count so far.
|
651
|
-
"""
|
652
|
-
total = summary.get("total_count", 0)
|
653
|
-
per_cat = summary.get("per_category_count", {})
|
654
|
-
cumulative = summary.get("total_counts", {})
|
655
|
-
cumulative_total = sum(cumulative.values()) if cumulative else 0
|
656
|
-
lines = []
|
657
|
-
if total > 0:
|
658
|
-
lines.append(f"{total} detections")
|
659
|
-
if per_cat:
|
660
|
-
lines.append("detections:")
|
661
|
-
for cat, count in per_cat.items():
|
662
|
-
lines.append(f"\t{cat}:{count}")
|
663
|
-
else:
|
664
|
-
lines.append("No detections")
|
665
|
-
lines.append(f"Total detections: {cumulative_total}")
|
666
|
-
if alerts:
|
667
|
-
lines.append(f"{len(alerts)} alert(s)")
|
668
|
-
return "\n".join(lines)
|
669
|
-
|
670
701
|
# ------------------------------------------------------------------ #
|
671
702
|
# Canonical ID helpers #
|
672
703
|
# ------------------------------------------------------------------ #
|
@@ -28,7 +28,7 @@ class WeldDefectConfig(BaseConfig):
|
|
28
28
|
smoothing_window_size: int = 20
|
29
29
|
smoothing_cooldown_frames: int = 5
|
30
30
|
smoothing_confidence_range_factor: float = 0.5
|
31
|
-
confidence_threshold: float = 0.
|
31
|
+
confidence_threshold: float = 0.45
|
32
32
|
defect_categories: List[str] = field(
|
33
33
|
default_factory=lambda: ['Bad Welding', 'Crack', 'Porosity', 'Spatters', 'Good Welding', 'Reinforcement']
|
34
34
|
)
|
@@ -166,7 +166,7 @@ matrice/deploy/utils/post_processing/usecases/chicken_pose_detection.py,sha256=-
|
|
166
166
|
matrice/deploy/utils/post_processing/usecases/child_monitoring.py,sha256=cWYDTXca0hci8k7rHNYfwzoByeGIeJ-d4tESwTnRXa0,38807
|
167
167
|
matrice/deploy/utils/post_processing/usecases/color_detection.py,sha256=Z8-akjy8a7f8YyiOzXu_Zi1Km30v-TRrymDqQOPpJ_8,43277
|
168
168
|
matrice/deploy/utils/post_processing/usecases/color_map_utils.py,sha256=SP-AEVcjLmL8rxblu-ixqUJC2fqlcr7ab4hWo4Fcr_k,2677
|
169
|
-
matrice/deploy/utils/post_processing/usecases/concrete_crack_detection.py,sha256=
|
169
|
+
matrice/deploy/utils/post_processing/usecases/concrete_crack_detection.py,sha256=whjhRfiFa1dZq-rytaA6QhtZLbaeUmz9fomTW0okDCQ,39264
|
170
170
|
matrice/deploy/utils/post_processing/usecases/crop_weed_detection.py,sha256=i7BWhC-D7liOg9fzFHFg_upd1fdvXlVHYzDRyHL-AdM,40322
|
171
171
|
matrice/deploy/utils/post_processing/usecases/customer_service.py,sha256=UWS83qxguyAyhh8a0JF5QH9DtKxO8I-gI2BPOjLPxBw,44642
|
172
172
|
matrice/deploy/utils/post_processing/usecases/defect_detection_products.py,sha256=flvTWv6vxa3q4zXD8_e8TW0pqNE5z3LIuvU9ceVKuXg,34481
|
@@ -204,7 +204,7 @@ matrice/deploy/utils/post_processing/usecases/underwater_pollution_detection.py,
|
|
204
204
|
matrice/deploy/utils/post_processing/usecases/vehicle_monitoring.py,sha256=-Q4x0jRYrmbDPAooWbTeYr1TowtvpU5oJ-sVeqRPaM4,31193
|
205
205
|
matrice/deploy/utils/post_processing/usecases/warehouse_object_segmentation.py,sha256=5uZXTJL_A3tUEN08T-_ZQpUoJ9gqbuuMc4z2mT4sMnQ,43753
|
206
206
|
matrice/deploy/utils/post_processing/usecases/weapon_detection.py,sha256=ait-RCgmu3BH6tNAl9nZgVXO0pSu4ETjtJzN7x6ZFRY,29365
|
207
|
-
matrice/deploy/utils/post_processing/usecases/weld_defect_detection.py,sha256=
|
207
|
+
matrice/deploy/utils/post_processing/usecases/weld_defect_detection.py,sha256=b0dAJGKUofbGrwHDJfIYb4pqmvp4Y23JK09Qb-34mxg,30209
|
208
208
|
matrice/deploy/utils/post_processing/usecases/windmill_maintenance.py,sha256=G1eqo3Z-HYmGJ6oeZYrpZwhpvqQ9Lc_T-6S7BLBXHeA,40498
|
209
209
|
matrice/deploy/utils/post_processing/usecases/wound_segmentation.py,sha256=7Nbc7zUQUKdXTSv8XpPuAZLIU3Mr1RU1KyO_D3thoGk,38289
|
210
210
|
matrice/deploy/utils/post_processing/utils/__init__.py,sha256=A49ksdXL7gRwBbIUwnU2ueFDGA67qVnEW_9lItOibtk,3626
|
@@ -225,8 +225,8 @@ matrice/deployment/camera_manager.py,sha256=ReBZqm1CNXRImKcbcZ4uWAT3TUWkof1D28oB
|
|
225
225
|
matrice/deployment/deployment.py,sha256=PLIUD-PxTaC2Zxb3Y12wUddsryV-OJetjCjLoSUh7S4,48103
|
226
226
|
matrice/deployment/inference_pipeline.py,sha256=bXLgd29ViA7o0c7YWLFJl1otBUQfTPb61jS6VawQB0Y,37918
|
227
227
|
matrice/deployment/streaming_gateway_manager.py,sha256=w5swGsuFVfZIdOm2ZuBHRHlRdYYJMLopLsf2gb91lQ8,20946
|
228
|
-
matrice-1.0.
|
229
|
-
matrice-1.0.
|
230
|
-
matrice-1.0.
|
231
|
-
matrice-1.0.
|
232
|
-
matrice-1.0.
|
228
|
+
matrice-1.0.99130.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
|
229
|
+
matrice-1.0.99130.dist-info/METADATA,sha256=xrPDt8ng3IcxPsSmYp4Wa2Co-URJP3qGjc1MDQDhm10,14624
|
230
|
+
matrice-1.0.99130.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
231
|
+
matrice-1.0.99130.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
|
232
|
+
matrice-1.0.99130.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|