matrice 1.0.99140__py3-none-any.whl → 1.0.99142__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/utils/post_processing/config.py +4 -0
- matrice/deploy/utils/post_processing/usecases/concrete_crack_detection.py +49 -59
- matrice/deploy/utils/post_processing/usecases/road_lane_detection.py +321 -329
- matrice/deploy/utils/post_processing/usecases/shelf_inventory_detection.py +310 -345
- {matrice-1.0.99140.dist-info → matrice-1.0.99142.dist-info}/METADATA +1 -1
- {matrice-1.0.99140.dist-info → matrice-1.0.99142.dist-info}/RECORD +9 -9
- {matrice-1.0.99140.dist-info → matrice-1.0.99142.dist-info}/WHEEL +0 -0
- {matrice-1.0.99140.dist-info → matrice-1.0.99142.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99140.dist-info → matrice-1.0.99142.dist-info}/top_level.txt +0 -0
@@ -1,9 +1,16 @@
|
|
1
|
+
"""
|
2
|
+
Road Lane Detection Use Case for Post-Processing
|
3
|
+
|
4
|
+
This module provides road lane detection functionality with lane type classification,
|
5
|
+
zone analysis, and alert generation.
|
6
|
+
"""
|
7
|
+
|
1
8
|
from typing import Any, Dict, List, Optional
|
2
|
-
from dataclasses import asdict
|
9
|
+
from dataclasses import asdict, dataclass, field
|
3
10
|
import time
|
4
11
|
from datetime import datetime, timezone
|
5
12
|
|
6
|
-
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
|
13
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
|
7
14
|
from ..utils import (
|
8
15
|
filter_by_confidence,
|
9
16
|
filter_by_categories,
|
@@ -16,31 +23,24 @@ from ..utils import (
|
|
16
23
|
BBoxSmoothingConfig,
|
17
24
|
BBoxSmoothingTracker
|
18
25
|
)
|
19
|
-
from dataclasses import dataclass, field
|
20
26
|
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
21
27
|
|
22
|
-
|
23
28
|
@dataclass
|
24
29
|
class LaneDetectionConfig(BaseConfig):
|
25
|
-
"""Configuration for lane detection use case
|
30
|
+
"""Configuration for road lane detection use case."""
|
26
31
|
enable_smoothing: bool = True
|
27
32
|
smoothing_algorithm: str = "observability"
|
28
33
|
smoothing_window_size: int = 20
|
29
34
|
smoothing_cooldown_frames: int = 5
|
30
35
|
smoothing_confidence_range_factor: float = 0.5
|
31
|
-
|
32
36
|
confidence_threshold: float = 0.6
|
33
|
-
|
34
37
|
usecase_categories: List[str] = field(
|
35
38
|
default_factory=lambda: ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
|
36
39
|
)
|
37
|
-
|
38
40
|
target_categories: List[str] = field(
|
39
41
|
default_factory=lambda: ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
|
40
42
|
)
|
41
|
-
|
42
43
|
alert_config: Optional[AlertConfig] = None
|
43
|
-
|
44
44
|
index_to_category: Optional[Dict[int, str]] = field(
|
45
45
|
default_factory=lambda: {
|
46
46
|
0: "Divider-Line",
|
@@ -52,100 +52,22 @@ class LaneDetectionConfig(BaseConfig):
|
|
52
52
|
}
|
53
53
|
)
|
54
54
|
|
55
|
-
|
56
55
|
class LaneDetectionUseCase(BaseProcessor):
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
total_track_ids.update(s)
|
66
|
-
return {
|
67
|
-
"total_count": len(total_track_ids),
|
68
|
-
"current_frame_count": len(frame_track_ids),
|
69
|
-
"total_unique_track_ids": len(total_track_ids),
|
70
|
-
"current_frame_track_ids": list(frame_track_ids),
|
71
|
-
"last_update_time": time.time(),
|
72
|
-
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
73
|
-
}
|
74
|
-
|
75
|
-
def _update_tracking_state(self, detections: list):
|
76
|
-
if not hasattr(self, "_per_category_total_track_ids"):
|
77
|
-
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
78
|
-
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
79
|
-
|
80
|
-
for det in detections:
|
81
|
-
cat = det.get("category")
|
82
|
-
raw_track_id = det.get("track_id")
|
83
|
-
if cat not in self.target_categories or raw_track_id is None:
|
84
|
-
continue
|
85
|
-
bbox = det.get("bounding_box", det.get("bbox"))
|
86
|
-
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
87
|
-
det["track_id"] = canonical_id
|
88
|
-
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
89
|
-
self._current_frame_track_ids[cat].add(canonical_id)
|
90
|
-
|
91
|
-
def get_total_counts(self):
|
92
|
-
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
93
|
-
|
94
|
-
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
95
|
-
hours = int(timestamp // 3600)
|
96
|
-
minutes = int((timestamp % 3600) // 60)
|
97
|
-
seconds = timestamp % 60
|
98
|
-
return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
|
99
|
-
|
100
|
-
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
101
|
-
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
102
|
-
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
103
|
-
|
104
|
-
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
|
105
|
-
if not stream_info:
|
106
|
-
return "00:00:00.00"
|
107
|
-
if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
108
|
-
stream_time_str = stream_info.get("video_timestamp", "")
|
109
|
-
return stream_time_str[:8]
|
110
|
-
else:
|
111
|
-
stream_time_str = stream_info.get("stream_time", "")
|
112
|
-
if stream_time_str:
|
113
|
-
try:
|
114
|
-
timestamp_str = stream_time_str.replace(" UTC", "")
|
115
|
-
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
116
|
-
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
117
|
-
return self._format_timestamp_for_stream(timestamp)
|
118
|
-
except:
|
119
|
-
return self._format_timestamp_for_stream(time.time())
|
120
|
-
else:
|
121
|
-
return self._format_timestamp_for_stream(time.time())
|
122
|
-
|
123
|
-
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
|
124
|
-
if not stream_info:
|
125
|
-
return "00:00:00"
|
126
|
-
is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
127
|
-
if is_video_chunk or stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
128
|
-
return "00:00:00"
|
129
|
-
else:
|
130
|
-
if self._tracking_start_time is None:
|
131
|
-
stream_time_str = stream_info.get("stream_time", "")
|
132
|
-
if stream_time_str:
|
133
|
-
try:
|
134
|
-
timestamp_str = stream_time_str.replace(" UTC", "")
|
135
|
-
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
136
|
-
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
137
|
-
except:
|
138
|
-
self._tracking_start_time = time.time()
|
139
|
-
else:
|
140
|
-
self._tracking_start_time = time.time()
|
141
|
-
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
142
|
-
dt = dt.replace(minute=0, second=0, microsecond=0)
|
143
|
-
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
56
|
+
CATEGORY_DISPLAY = {
|
57
|
+
"Divider-Line": "Divider Line",
|
58
|
+
"Dotted-Line": "Dotted Line",
|
59
|
+
"Double-Line": "Double Line",
|
60
|
+
"Random-Line": "Random Line",
|
61
|
+
"Road-Sign-Line": "Road Sign Line",
|
62
|
+
"Solid-Line": "Solid Line"
|
63
|
+
}
|
144
64
|
|
145
65
|
def __init__(self):
|
146
66
|
super().__init__("lane_detection")
|
147
67
|
self.category = "traffic"
|
148
|
-
self.
|
68
|
+
self.CASE_TYPE: Optional[str] = 'lane_detection'
|
69
|
+
self.CASE_VERSION: Optional[str] = '1.0'
|
70
|
+
self.target_categories = ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
|
149
71
|
self.smoothing_tracker = None
|
150
72
|
self.tracker = None
|
151
73
|
self._total_frame_counter = 0
|
@@ -155,6 +77,8 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
155
77
|
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
156
78
|
self._track_merge_iou_threshold: float = 0.05
|
157
79
|
self._track_merge_time_window: float = 7.0
|
80
|
+
self._ascending_alert_list: List[int] = []
|
81
|
+
self.current_incident_end_timestamp: str = "N/A"
|
158
82
|
|
159
83
|
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
160
84
|
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
@@ -173,7 +97,7 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
173
97
|
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
174
98
|
else:
|
175
99
|
processed_data = data
|
176
|
-
self.logger.debug("
|
100
|
+
self.logger.debug("No confidence filtering applied")
|
177
101
|
|
178
102
|
if config.index_to_category:
|
179
103
|
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
@@ -202,7 +126,7 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
202
126
|
if self.tracker is None:
|
203
127
|
tracker_config = TrackerConfig()
|
204
128
|
self.tracker = AdvancedTracker(tracker_config)
|
205
|
-
self.logger.info("Initialized AdvancedTracker for Lane
|
129
|
+
self.logger.info("Initialized AdvancedTracker for Lane Detection")
|
206
130
|
processed_data = self.tracker.update(processed_data)
|
207
131
|
except Exception as e:
|
208
132
|
self.logger.warning(f"AdvancedTracker failed: {e}")
|
@@ -222,184 +146,347 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
222
146
|
counting_summary = self._count_categories(processed_data, config)
|
223
147
|
total_counts = self.get_total_counts()
|
224
148
|
counting_summary['total_counts'] = total_counts
|
225
|
-
|
226
|
-
alerts = self._check_alerts(counting_summary, config)
|
149
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
227
150
|
predictions = self._extract_predictions(processed_data)
|
228
|
-
summary = self._generate_summary(counting_summary, alerts)
|
229
151
|
|
230
|
-
|
231
|
-
tracking_stats_list = self._generate_tracking_stats(counting_summary,
|
152
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
153
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
154
|
+
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
|
155
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
232
156
|
|
233
|
-
|
157
|
+
incidents = incidents_list[0] if incidents_list else {}
|
234
158
|
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
159
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
160
|
+
summary = summary_list[0] if summary_list else {}
|
161
|
+
agg_summary = {str(frame_number): {
|
162
|
+
"incidents": incidents,
|
163
|
+
"tracking_stats": tracking_stats,
|
164
|
+
"business_analytics": business_analytics,
|
165
|
+
"alerts": alerts,
|
166
|
+
"human_text": summary}
|
167
|
+
}
|
235
168
|
|
236
169
|
context.mark_completed()
|
237
170
|
result = self.create_result(
|
238
|
-
data={
|
239
|
-
"counting_summary": counting_summary,
|
240
|
-
"general_counting_summary": general_counting_summary,
|
241
|
-
"alerts": alerts,
|
242
|
-
"total_detections": counting_summary.get("total_count", 0),
|
243
|
-
"events": events,
|
244
|
-
"tracking_stats": tracking_stats,
|
245
|
-
},
|
171
|
+
data={"agg_summary": agg_summary},
|
246
172
|
usecase=self.name,
|
247
173
|
category=self.category,
|
248
174
|
context=context
|
249
175
|
)
|
250
|
-
result.summary = summary
|
251
|
-
result.insights = insights
|
252
|
-
result.predictions = predictions
|
253
176
|
return result
|
254
177
|
|
255
|
-
def
|
256
|
-
|
178
|
+
def _check_alerts(self, summary: dict, frame_number: Any, config: LaneDetectionConfig) -> List[Dict]:
|
179
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
180
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
181
|
+
if len(window) < 2:
|
182
|
+
return True
|
183
|
+
increasing = 0
|
184
|
+
total = 0
|
185
|
+
for i in range(1, len(window)):
|
186
|
+
if window[i] >= window[i - 1]:
|
187
|
+
increasing += 1
|
188
|
+
total += 1
|
189
|
+
ratio = increasing / total
|
190
|
+
return ratio >= threshold
|
191
|
+
|
257
192
|
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
258
|
-
|
259
|
-
|
193
|
+
alerts = []
|
194
|
+
total_detections = summary.get("total_count", 0)
|
195
|
+
total_counts_dict = summary.get("total_counts", {})
|
196
|
+
per_category_count = summary.get("per_category_count", {})
|
197
|
+
|
198
|
+
if not config.alert_config:
|
199
|
+
return alerts
|
200
|
+
|
201
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
202
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
203
|
+
if category == "all" and total_detections > threshold:
|
204
|
+
alerts.append({
|
205
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
206
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
207
|
+
"incident_category": self.CASE_TYPE,
|
208
|
+
"threshold_level": threshold,
|
209
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
210
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
211
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
212
|
+
})
|
213
|
+
elif category in per_category_count and per_category_count[category] > threshold:
|
214
|
+
alerts.append({
|
215
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
216
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
217
|
+
"incident_category": self.CASE_TYPE,
|
218
|
+
"threshold_level": threshold,
|
219
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
220
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
221
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
222
|
+
})
|
223
|
+
return alerts
|
224
|
+
|
225
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: LaneDetectionConfig,
|
226
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
227
|
+
incidents = []
|
260
228
|
total_detections = counting_summary.get("total_count", 0)
|
229
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
230
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
231
|
+
|
232
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
261
233
|
|
262
234
|
if total_detections > 0:
|
263
|
-
level = "
|
235
|
+
level = "low"
|
264
236
|
intensity = 5.0
|
237
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
238
|
+
if start_timestamp and self.current_incident_end_timestamp == 'N/A':
|
239
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
240
|
+
elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
|
241
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
242
|
+
self.current_incident_end_timestamp = current_timestamp
|
243
|
+
elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
|
244
|
+
self.current_incident_end_timestamp = 'N/A'
|
245
|
+
|
265
246
|
if config.alert_config and config.alert_config.count_thresholds:
|
266
247
|
threshold = config.alert_config.count_thresholds.get("all", 15)
|
267
248
|
intensity = min(10.0, (total_detections / threshold) * 10)
|
268
|
-
if intensity >=
|
249
|
+
if intensity >= 9:
|
269
250
|
level = "critical"
|
251
|
+
self._ascending_alert_list.append(3)
|
252
|
+
elif intensity >= 7:
|
253
|
+
level = "significant"
|
254
|
+
self._ascending_alert_list.append(2)
|
270
255
|
elif intensity >= 5:
|
271
|
-
level = "
|
256
|
+
level = "medium"
|
257
|
+
self._ascending_alert_list.append(1)
|
272
258
|
else:
|
273
|
-
level = "
|
259
|
+
level = "low"
|
260
|
+
self._ascending_alert_list.append(0)
|
274
261
|
else:
|
275
|
-
if total_detections >
|
262
|
+
if total_detections > 30:
|
276
263
|
level = "critical"
|
264
|
+
intensity = 10.0
|
265
|
+
self._ascending_alert_list.append(3)
|
266
|
+
elif total_detections > 25:
|
267
|
+
level = "significant"
|
277
268
|
intensity = 9.0
|
269
|
+
self._ascending_alert_list.append(2)
|
278
270
|
elif total_detections > 15:
|
279
|
-
level = "
|
271
|
+
level = "medium"
|
280
272
|
intensity = 7.0
|
273
|
+
self._ascending_alert_list.append(1)
|
281
274
|
else:
|
282
|
-
level = "
|
275
|
+
level = "low"
|
283
276
|
intensity = min(10.0, total_detections / 3.0)
|
277
|
+
self._ascending_alert_list.append(0)
|
284
278
|
|
285
|
-
human_text_lines = ["
|
286
|
-
human_text_lines.append(f"
|
279
|
+
human_text_lines = [f"LANE INCIDENTS DETECTED @ {current_timestamp}:"]
|
280
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
287
281
|
human_text = "\n".join(human_text_lines)
|
288
282
|
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
"
|
296
|
-
"
|
297
|
-
|
298
|
-
}
|
299
|
-
|
300
|
-
|
301
|
-
"
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
elif percentage <= 70:
|
317
|
-
intensity_message = "ALERT: High lane density in the scene"
|
318
|
-
else:
|
319
|
-
intensity_message = "ALERT: Very high lane density in the scene"
|
320
|
-
else:
|
321
|
-
if total_detections > 15:
|
322
|
-
intensity_message = "ALERT: High lane density in the scene"
|
323
|
-
elif total_detections == 1:
|
324
|
-
intensity_message = "ALERT: Low lane density in the scene"
|
325
|
-
else:
|
326
|
-
intensity_message = "ALERT: Moderate lane density in the scene"
|
327
|
-
|
328
|
-
alert_event = {
|
329
|
-
"type": alert.get("type", "lane_density_alert"),
|
330
|
-
"stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
|
331
|
-
"level": alert.get("severity", "warning"),
|
332
|
-
"intensity": 8.0,
|
333
|
-
"config": {
|
334
|
-
"min_value": 0,
|
335
|
-
"max_value": 10,
|
336
|
-
"level_settings": {"info": 2, "warning": 5, "critical": 7}
|
337
|
-
},
|
338
|
-
"application_name": "Lane Density Alert System",
|
339
|
-
"application_version": "1.2",
|
340
|
-
"location_info": alert.get("zone"),
|
341
|
-
"human_text": f"{datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')} : {intensity_message}"
|
342
|
-
}
|
343
|
-
frame_events.append(alert_event)
|
283
|
+
alert_settings = []
|
284
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
285
|
+
alert_settings.append({
|
286
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
287
|
+
"incident_category": self.CASE_TYPE,
|
288
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
289
|
+
"ascending": True,
|
290
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
291
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
292
|
+
})
|
293
|
+
|
294
|
+
event = self.create_incident(
|
295
|
+
incident_id=f"{self.CASE_TYPE}_{frame_number}",
|
296
|
+
incident_type=self.CASE_TYPE,
|
297
|
+
severity_level=level,
|
298
|
+
human_text=human_text,
|
299
|
+
camera_info=camera_info,
|
300
|
+
alerts=alerts,
|
301
|
+
alert_settings=alert_settings,
|
302
|
+
start_time=start_timestamp,
|
303
|
+
end_time=self.current_incident_end_timestamp,
|
304
|
+
level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
|
305
|
+
)
|
306
|
+
incidents.append(event)
|
307
|
+
else:
|
308
|
+
self._ascending_alert_list.append(0)
|
309
|
+
incidents.append({})
|
344
310
|
|
345
|
-
return
|
311
|
+
return incidents
|
346
312
|
|
347
|
-
def _generate_tracking_stats(self, counting_summary: Dict,
|
313
|
+
def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: LaneDetectionConfig,
|
348
314
|
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
349
|
-
|
350
|
-
tracking_stats = [
|
351
|
-
frame_tracking_stats = tracking_stats[0][frame_key]
|
352
|
-
|
315
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
316
|
+
tracking_stats = []
|
353
317
|
total_detections = counting_summary.get("total_count", 0)
|
354
|
-
|
355
|
-
cumulative_total = sum(total_counts.values()) if total_counts else 0
|
318
|
+
total_counts_dict = counting_summary.get("total_counts", {})
|
356
319
|
per_category_count = counting_summary.get("per_category_count", {})
|
320
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
321
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
322
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
323
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
324
|
+
|
325
|
+
total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
|
326
|
+
current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
|
327
|
+
|
328
|
+
detections = []
|
329
|
+
for detection in counting_summary.get("detections", []):
|
330
|
+
bbox = detection.get("bounding_box", {})
|
331
|
+
category = detection.get("category", "lane")
|
332
|
+
if detection.get("masks"):
|
333
|
+
segmentation = detection.get("masks", [])
|
334
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
335
|
+
elif detection.get("segmentation"):
|
336
|
+
segmentation = detection.get("segmentation")
|
337
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
338
|
+
elif detection.get("mask"):
|
339
|
+
segmentation = detection.get("mask")
|
340
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
341
|
+
else:
|
342
|
+
detection_obj = self.create_detection_object(category, bbox)
|
343
|
+
detections.append(detection_obj)
|
344
|
+
|
345
|
+
alert_settings = []
|
346
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
347
|
+
alert_settings.append({
|
348
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
349
|
+
"incident_category": self.CASE_TYPE,
|
350
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
351
|
+
"ascending": True,
|
352
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
353
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
354
|
+
})
|
355
|
+
|
356
|
+
human_text_lines = [f"Tracking Statistics:"]
|
357
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
|
358
|
+
for cat, count in per_category_count.items():
|
359
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
360
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
361
|
+
for cat, count in total_counts_dict.items():
|
362
|
+
if count > 0:
|
363
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
364
|
+
human_text_lines.append(f"Alerts: {alerts[0].get('settings', {})} sent @ {current_timestamp}" if alerts else "Alerts: None")
|
365
|
+
human_text = "\n".join(human_text_lines)
|
357
366
|
|
358
|
-
|
367
|
+
reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
|
368
|
+
tracking_stat = self.create_tracking_stats(
|
369
|
+
total_counts=total_counts,
|
370
|
+
current_counts=current_counts,
|
371
|
+
detections=detections,
|
372
|
+
human_text=human_text,
|
373
|
+
camera_info=camera_info,
|
374
|
+
alerts=alerts,
|
375
|
+
alert_settings=alert_settings,
|
376
|
+
reset_settings=reset_settings,
|
377
|
+
start_time=high_precision_start_timestamp,
|
378
|
+
reset_time=high_precision_reset_timestamp
|
379
|
+
)
|
380
|
+
tracking_stats.append(tracking_stat)
|
381
|
+
return tracking_stats
|
359
382
|
|
360
|
-
|
361
|
-
|
383
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: LaneDetectionConfig,
|
384
|
+
stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
385
|
+
if is_empty:
|
386
|
+
return []
|
362
387
|
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
388
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
389
|
+
lines = {}
|
390
|
+
lines["Application Name"] = self.CASE_TYPE
|
391
|
+
lines["Application Version"] = self.CASE_VERSION
|
392
|
+
if incidents:
|
393
|
+
lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
394
|
+
if tracking_stats:
|
395
|
+
lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
396
|
+
if business_analytics:
|
397
|
+
lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
398
|
+
if not incidents and not tracking_stats and not business_analytics:
|
399
|
+
lines["Summary"] = "No Summary Data"
|
400
|
+
return [lines]
|
376
401
|
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
402
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
403
|
+
frame_track_ids = {det.get('track_id') for det in detections if det.get('track_id') is not None}
|
404
|
+
total_track_ids = set()
|
405
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
406
|
+
total_track_ids.update(s)
|
407
|
+
return {
|
408
|
+
"total_count": len(total_track_ids),
|
409
|
+
"current_frame_count": len(frame_track_ids),
|
410
|
+
"total_unique_track_ids": len(total_track_ids),
|
411
|
+
"current_frame_track_ids": list(frame_track_ids),
|
412
|
+
"last_update_time": time.time(),
|
413
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
414
|
+
}
|
384
415
|
|
385
|
-
|
416
|
+
def _update_tracking_state(self, detections: list):
|
417
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
418
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
419
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
386
420
|
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
"
|
393
|
-
|
394
|
-
"
|
395
|
-
|
396
|
-
|
397
|
-
"local_frame_id": frame_key,
|
398
|
-
"detections": counting_summary.get("detections", [])
|
399
|
-
}
|
421
|
+
for det in detections:
|
422
|
+
cat = det.get("category")
|
423
|
+
raw_track_id = det.get("track_id")
|
424
|
+
if cat not in self.target_categories or raw_track_id is None:
|
425
|
+
continue
|
426
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
427
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
428
|
+
det["track_id"] = canonical_id
|
429
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
430
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
400
431
|
|
401
|
-
|
402
|
-
return
|
432
|
+
def get_total_counts(self):
|
433
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
434
|
+
|
435
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
436
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
437
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
438
|
+
|
439
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
440
|
+
hours = int(timestamp // 3600)
|
441
|
+
minutes = int((timestamp % 3600) // 60)
|
442
|
+
seconds = round(float(timestamp % 60), 2)
|
443
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
444
|
+
|
445
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str] = None) -> str:
|
446
|
+
if not stream_info:
|
447
|
+
return "00:00:00.00" if precision else "00:00:00"
|
448
|
+
if precision:
|
449
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
450
|
+
start_time = (int(frame_id) if frame_id else stream_info.get("input_settings", {}).get("start_frame", 30)) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
451
|
+
return self._format_timestamp_for_video(start_time)
|
452
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
453
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
454
|
+
start_time = (int(frame_id) if frame_id else stream_info.get("input_settings", {}).get("start_frame", 30)) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
455
|
+
return self._format_timestamp_for_video(start_time)
|
456
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
457
|
+
if stream_time_str:
|
458
|
+
try:
|
459
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
460
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
461
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
462
|
+
return self._format_timestamp_for_stream(timestamp)
|
463
|
+
except:
|
464
|
+
return self._format_timestamp_for_stream(time.time())
|
465
|
+
return self._format_timestamp_for_stream(time.time())
|
466
|
+
|
467
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
468
|
+
if not stream_info:
|
469
|
+
return "00:00:00" if not precision else "00:00:00.00"
|
470
|
+
if precision:
|
471
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
472
|
+
return "00:00:00.00"
|
473
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
474
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
475
|
+
return "00:00:00"
|
476
|
+
if self._tracking_start_time is None:
|
477
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
478
|
+
if stream_time_str:
|
479
|
+
try:
|
480
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
481
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
482
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
483
|
+
except:
|
484
|
+
self._tracking_start_time = time.time()
|
485
|
+
else:
|
486
|
+
self._tracking_start_time = time.time()
|
487
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
488
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
489
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
403
490
|
|
404
491
|
def _count_categories(self, detections: list, config: LaneDetectionConfig) -> dict:
|
405
492
|
counts = {}
|
@@ -421,74 +508,6 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
421
508
|
]
|
422
509
|
}
|
423
510
|
|
424
|
-
CATEGORY_DISPLAY = {
|
425
|
-
"Divider-Line": "divider-line",
|
426
|
-
"Dotted-Line": "dotted-line",
|
427
|
-
"Double-Line": "double-line",
|
428
|
-
"Random-Line": "random-line",
|
429
|
-
"Road-Sign-Line": "road-sign-line",
|
430
|
-
"Solid-Line": "solid-line"
|
431
|
-
}
|
432
|
-
|
433
|
-
def _generate_insights(self, summary: dict, config: LaneDetectionConfig) -> List[str]:
|
434
|
-
insights = []
|
435
|
-
per_cat = summary.get("per_category_count", {})
|
436
|
-
total_detections = summary.get("total_count", 0)
|
437
|
-
|
438
|
-
if total_detections == 0:
|
439
|
-
insights.append("No lane detections in the scene")
|
440
|
-
return insights
|
441
|
-
insights.append(f"EVENT: Detected {total_detections} lanes in the scene")
|
442
|
-
|
443
|
-
intensity_threshold = None
|
444
|
-
if config.alert_config and config.alert_config.count_thresholds and "all" in config.alert_config.count_thresholds:
|
445
|
-
intensity_threshold = config.alert_config.count_thresholds["all"]
|
446
|
-
|
447
|
-
if intensity_threshold is not None:
|
448
|
-
percentage = (total_detections / intensity_threshold) * 100
|
449
|
-
if percentage < 20:
|
450
|
-
insights.append(f"INTENSITY: Low lane density ({percentage:.1f}% of capacity)")
|
451
|
-
elif percentage <= 50:
|
452
|
-
insights.append(f"INTENSITY: Moderate lane density ({percentage:.1f}% of capacity)")
|
453
|
-
elif percentage <= 70:
|
454
|
-
insights.append(f"INTENSITY: High lane density ({percentage:.1f}% of capacity)")
|
455
|
-
else:
|
456
|
-
insights.append(f"INTENSITY: Very high lane density ({percentage:.1f}% of capacity)")
|
457
|
-
|
458
|
-
for cat, count in per_cat.items():
|
459
|
-
display = self.CATEGORY_DISPLAY.get(cat, cat)
|
460
|
-
insights.append(f"{display}: {count}")
|
461
|
-
return insights
|
462
|
-
|
463
|
-
def _check_alerts(self, summary: dict, config: LaneDetectionConfig) -> List[Dict]:
|
464
|
-
alerts = []
|
465
|
-
if not config.alert_config:
|
466
|
-
return alerts
|
467
|
-
total = summary.get("total_count", 0)
|
468
|
-
if config.alert_config.count_thresholds:
|
469
|
-
for category, threshold in config.alert_config.count_thresholds.items():
|
470
|
-
if category == "all" and total >= threshold:
|
471
|
-
alerts.append({
|
472
|
-
"type": "count_threshold",
|
473
|
-
"severity": "warning",
|
474
|
-
"message": f"Total lane detections ({total}) exceeds threshold ({threshold})",
|
475
|
-
"category": category,
|
476
|
-
"current_count": total,
|
477
|
-
"threshold": threshold
|
478
|
-
})
|
479
|
-
elif category in summary.get("per_category_count", {}):
|
480
|
-
count = summary.get("per_category_count", {})[category]
|
481
|
-
if count >= threshold:
|
482
|
-
alerts.append({
|
483
|
-
"type": "count_threshold",
|
484
|
-
"severity": "warning",
|
485
|
-
"message": f"{category} count ({count}) exceeds threshold ({threshold})",
|
486
|
-
"category": category,
|
487
|
-
"current_count": count,
|
488
|
-
"threshold": threshold
|
489
|
-
})
|
490
|
-
return alerts
|
491
|
-
|
492
511
|
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
493
512
|
return [
|
494
513
|
{
|
@@ -499,25 +518,6 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
499
518
|
for det in detections
|
500
519
|
]
|
501
520
|
|
502
|
-
def _generate_summary(self, summary: dict, alerts: List) -> str:
|
503
|
-
total = summary.get("total_count", 0)
|
504
|
-
per_cat = summary.get("per_category_count", {})
|
505
|
-
cumulative = summary.get("total_counts", {})
|
506
|
-
cumulative_total = sum(cumulative.values()) if cumulative else 0
|
507
|
-
lines = []
|
508
|
-
if total > 0:
|
509
|
-
lines.append(f"{total} lane detections")
|
510
|
-
if per_cat:
|
511
|
-
lines.append("detections:")
|
512
|
-
for cat, count in per_cat.items():
|
513
|
-
lines.append(f"\t{cat}: {count}")
|
514
|
-
else:
|
515
|
-
lines.append("No lane detections")
|
516
|
-
lines.append(f"Total lane detections: {cumulative_total}")
|
517
|
-
if alerts:
|
518
|
-
lines.append(f"{len(alerts)} alert(s)")
|
519
|
-
return "\n".join(lines)
|
520
|
-
|
521
521
|
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
522
522
|
def _bbox_to_list(bbox):
|
523
523
|
if bbox is None:
|
@@ -539,31 +539,25 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
539
539
|
return 0.0
|
540
540
|
x1_min, y1_min, x1_max, y1_max = l1
|
541
541
|
x2_min, y2_min, x2_max, y2_max = l2
|
542
|
-
|
543
542
|
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
544
543
|
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
545
544
|
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
546
545
|
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
547
|
-
|
548
546
|
inter_x_min = max(x1_min, x2_min)
|
549
547
|
inter_y_min = max(y1_min, y2_min)
|
550
548
|
inter_x_max = min(x1_max, x2_max)
|
551
549
|
inter_y_max = min(y1_max, y2_max)
|
552
|
-
|
553
550
|
inter_w = max(0.0, inter_x_max - inter_x_min)
|
554
551
|
inter_h = max(0.0, inter_y_max - inter_y_min)
|
555
552
|
inter_area = inter_w * inter_h
|
556
|
-
|
557
553
|
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
558
554
|
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
559
555
|
union_area = area1 + area2 - inter_area
|
560
|
-
|
561
556
|
return (inter_area / union_area) if union_area > 0 else 0.0
|
562
557
|
|
563
558
|
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
564
559
|
if raw_id is None or bbox is None:
|
565
560
|
return raw_id
|
566
|
-
|
567
561
|
now = time.time()
|
568
562
|
if raw_id in self._track_aliases:
|
569
563
|
canonical_id = self._track_aliases[raw_id]
|
@@ -573,7 +567,6 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
573
567
|
track_info["last_update"] = now
|
574
568
|
track_info["raw_ids"].add(raw_id)
|
575
569
|
return canonical_id
|
576
|
-
|
577
570
|
for canonical_id, info in self._canonical_tracks.items():
|
578
571
|
if now - info["last_update"] > self._track_merge_time_window:
|
579
572
|
continue
|
@@ -584,13 +577,12 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
584
577
|
info["last_update"] = now
|
585
578
|
info["raw_ids"].add(raw_id)
|
586
579
|
return canonical_id
|
587
|
-
|
588
580
|
canonical_id = raw_id
|
589
581
|
self._track_aliases[raw_id] = canonical_id
|
590
582
|
self._canonical_tracks[canonical_id] = {
|
591
583
|
"last_bbox": bbox,
|
592
584
|
"last_update": now,
|
593
|
-
"raw_ids": {raw_id}
|
585
|
+
"raw_ids": {raw_id}
|
594
586
|
}
|
595
587
|
return canonical_id
|
596
588
|
|