matrice 1.0.99141__py3-none-any.whl → 1.0.99143__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,9 @@
1
1
  from typing import Any, Dict, List, Optional
2
- from dataclasses import asdict
2
+ from dataclasses import asdict, dataclass, field
3
3
  import time
4
4
  from datetime import datetime, timezone
5
5
 
6
- from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
6
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
7
7
  from ..utils import (
8
8
  filter_by_confidence,
9
9
  filter_by_categories,
@@ -16,31 +16,25 @@ from ..utils import (
16
16
  BBoxSmoothingConfig,
17
17
  BBoxSmoothingTracker
18
18
  )
19
- from dataclasses import dataclass, field
20
19
  from ..core.config import BaseConfig, AlertConfig, ZoneConfig
21
20
 
22
21
 
23
22
  @dataclass
24
23
  class LaneDetectionConfig(BaseConfig):
25
- """Configuration for lane detection use case in road lane monitoring."""
24
+ """Configuration for lane detection use case in road monitoring."""
26
25
  enable_smoothing: bool = True
27
26
  smoothing_algorithm: str = "observability"
28
27
  smoothing_window_size: int = 20
29
28
  smoothing_cooldown_frames: int = 5
30
29
  smoothing_confidence_range_factor: float = 0.5
31
-
32
30
  confidence_threshold: float = 0.6
33
-
34
31
  usecase_categories: List[str] = field(
35
32
  default_factory=lambda: ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
36
33
  )
37
-
38
34
  target_categories: List[str] = field(
39
35
  default_factory=lambda: ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
40
36
  )
41
-
42
37
  alert_config: Optional[AlertConfig] = None
43
-
44
38
  index_to_category: Optional[Dict[int, str]] = field(
45
39
  default_factory=lambda: {
46
40
  0: "Divider-Line",
@@ -54,98 +48,21 @@ class LaneDetectionConfig(BaseConfig):
54
48
 
55
49
 
56
50
  class LaneDetectionUseCase(BaseProcessor):
57
- def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
58
- frame_track_ids = set()
59
- for det in detections:
60
- tid = det.get('track_id')
61
- if tid is not None:
62
- frame_track_ids.add(tid)
63
- total_track_ids = set()
64
- for s in getattr(self, '_per_category_total_track_ids', {}).values():
65
- total_track_ids.update(s)
66
- return {
67
- "total_count": len(total_track_ids),
68
- "current_frame_count": len(frame_track_ids),
69
- "total_unique_track_ids": len(total_track_ids),
70
- "current_frame_track_ids": list(frame_track_ids),
71
- "last_update_time": time.time(),
72
- "total_frames_processed": getattr(self, '_total_frame_counter', 0)
73
- }
74
-
75
- def _update_tracking_state(self, detections: list):
76
- if not hasattr(self, "_per_category_total_track_ids"):
77
- self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
78
- self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
79
-
80
- for det in detections:
81
- cat = det.get("category")
82
- raw_track_id = det.get("track_id")
83
- if cat not in self.target_categories or raw_track_id is None:
84
- continue
85
- bbox = det.get("bounding_box", det.get("bbox"))
86
- canonical_id = self._merge_or_register_track(raw_track_id, bbox)
87
- det["track_id"] = canonical_id
88
- self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
89
- self._current_frame_track_ids[cat].add(canonical_id)
90
-
91
- def get_total_counts(self):
92
- return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
93
-
94
- def _format_timestamp_for_video(self, timestamp: float) -> str:
95
- hours = int(timestamp // 3600)
96
- minutes = int((timestamp % 3600) // 60)
97
- seconds = timestamp % 60
98
- return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
99
-
100
- def _format_timestamp_for_stream(self, timestamp: float) -> str:
101
- dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
102
- return dt.strftime('%Y:%m:%d %H:%M:%S')
103
-
104
- def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
105
- if not stream_info:
106
- return "00:00:00.00"
107
- if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
108
- stream_time_str = stream_info.get("video_timestamp", "")
109
- return stream_time_str[:8]
110
- else:
111
- stream_time_str = stream_info.get("stream_time", "")
112
- if stream_time_str:
113
- try:
114
- timestamp_str = stream_time_str.replace(" UTC", "")
115
- dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
116
- timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
117
- return self._format_timestamp_for_stream(timestamp)
118
- except:
119
- return self._format_timestamp_for_stream(time.time())
120
- else:
121
- return self._format_timestamp_for_stream(time.time())
122
-
123
- def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
124
- if not stream_info:
125
- return "00:00:00"
126
- is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
127
- if is_video_chunk or stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
128
- return "00:00:00"
129
- else:
130
- if self._tracking_start_time is None:
131
- stream_time_str = stream_info.get("stream_time", "")
132
- if stream_time_str:
133
- try:
134
- timestamp_str = stream_time_str.replace(" UTC", "")
135
- dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
136
- self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
137
- except:
138
- self._tracking_start_time = time.time()
139
- else:
140
- self._tracking_start_time = time.time()
141
- dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
142
- dt = dt.replace(minute=0, second=0, microsecond=0)
143
- return dt.strftime('%Y:%m:%d %H:%M:%S')
51
+ CATEGORY_DISPLAY = {
52
+ "Divider-Line": "Divider Line",
53
+ "Dotted-Line": "Dotted Line",
54
+ "Double-Line": "Double Line",
55
+ "Random-Line": "Random Line",
56
+ "Road-Sign-Line": "Road Sign Line",
57
+ "Solid-Line": "Solid Line"
58
+ }
144
59
 
145
60
  def __init__(self):
146
61
  super().__init__("lane_detection")
147
62
  self.category = "traffic"
148
- self.target_categories = ["Divider-Line", "Dotted-Line", "Double-Line", "Random-Line", "Road-Sign-Line", "Solid-Line"]
63
+ self.CASE_TYPE: Optional[str] = 'lane_detection'
64
+ self.CASE_VERSION: Optional[str] = '1.0'
65
+ self.target_categories = ['Divider-Line', 'Dotted-Line', 'Double-Line', 'Random-Line', 'Road-Sign-Line', 'Solid-Line']
149
66
  self.smoothing_tracker = None
150
67
  self.tracker = None
151
68
  self._total_frame_counter = 0
@@ -155,6 +72,8 @@ class LaneDetectionUseCase(BaseProcessor):
155
72
  self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
156
73
  self._track_merge_iou_threshold: float = 0.05
157
74
  self._track_merge_time_window: float = 7.0
75
+ self._ascending_alert_list: List[int] = []
76
+ self.current_incident_end_timestamp: str = "N/A"
158
77
 
159
78
  def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
160
79
  stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
@@ -173,7 +92,7 @@ class LaneDetectionUseCase(BaseProcessor):
173
92
  self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
174
93
  else:
175
94
  processed_data = data
176
- self.logger.debug("Did not apply confidence filtering")
95
+ self.logger.debug("Did not apply confidence filtering since no threshold provided")
177
96
 
178
97
  if config.index_to_category:
179
98
  processed_data = apply_category_mapping(processed_data, config.index_to_category)
@@ -202,7 +121,7 @@ class LaneDetectionUseCase(BaseProcessor):
202
121
  if self.tracker is None:
203
122
  tracker_config = TrackerConfig()
204
123
  self.tracker = AdvancedTracker(tracker_config)
205
- self.logger.info("Initialized AdvancedTracker for Lane Monitoring")
124
+ self.logger.info("Initialized AdvancedTracker for Lane Detection")
206
125
  processed_data = self.tracker.update(processed_data)
207
126
  except Exception as e:
208
127
  self.logger.warning(f"AdvancedTracker failed: {e}")
@@ -222,184 +141,364 @@ class LaneDetectionUseCase(BaseProcessor):
222
141
  counting_summary = self._count_categories(processed_data, config)
223
142
  total_counts = self.get_total_counts()
224
143
  counting_summary['total_counts'] = total_counts
225
- insights = self._generate_insights(counting_summary, config)
226
- alerts = self._check_alerts(counting_summary, config)
144
+ alerts = self._check_alerts(counting_summary, frame_number, config)
227
145
  predictions = self._extract_predictions(processed_data)
228
- summary = self._generate_summary(counting_summary, alerts)
229
146
 
230
- events_list = self._generate_events(counting_summary, alerts, config, frame_number, stream_info)
231
- tracking_stats_list = self._generate_tracking_stats(counting_summary, insights, summary, config, frame_number, stream_info)
147
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
148
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
149
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
150
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
232
151
 
233
- events = events_list[0] if events_list else {}
152
+ incidents = incidents_list[0] if incidents_list else {}
234
153
  tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
154
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
155
+ summary = summary_list[0] if summary_list else {}
156
+ agg_summary = {str(frame_number): {
157
+ "incidents": incidents,
158
+ "tracking_stats": tracking_stats,
159
+ "business_analytics": business_analytics,
160
+ "alerts": alerts,
161
+ "human_text": summary}
162
+ }
235
163
 
236
164
  context.mark_completed()
237
165
  result = self.create_result(
238
- data={
239
- "counting_summary": counting_summary,
240
- "general_counting_summary": general_counting_summary,
241
- "alerts": alerts,
242
- "total_detections": counting_summary.get("total_count", 0),
243
- "events": events,
244
- "tracking_stats": tracking_stats,
245
- },
166
+ data={"agg_summary": agg_summary},
246
167
  usecase=self.name,
247
168
  category=self.category,
248
169
  context=context
249
170
  )
250
- result.summary = summary
251
- result.insights = insights
252
- result.predictions = predictions
253
171
  return result
254
172
 
255
- def _generate_events(self, counting_summary: Dict, alerts: List, config: LaneDetectionConfig,
256
- frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
173
+ def _check_alerts(self, summary: dict, frame_number: Any, config: LaneDetectionConfig) -> List[Dict]:
174
+ def get_trend(data, lookback=900, threshold=0.6):
175
+ window = data[-lookback:] if len(data) >= lookback else data
176
+ if len(window) < 2:
177
+ return True
178
+ increasing = 0
179
+ total = 0
180
+ for i in range(1, len(window)):
181
+ if window[i] >= window[i - 1]:
182
+ increasing += 1
183
+ total += 1
184
+ ratio = increasing / total
185
+ return ratio >= threshold
186
+
257
187
  frame_key = str(frame_number) if frame_number is not None else "current_frame"
258
- events = [{frame_key: []}]
259
- frame_events = events[0][frame_key]
188
+ alerts = []
189
+ total_detections = summary.get("total_count", 0)
190
+ total_counts_dict = summary.get("total_counts", {})
191
+ per_category_count = summary.get("per_category_count", {})
192
+
193
+ if not config.alert_config:
194
+ return alerts
195
+
196
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
197
+ for category, threshold in config.alert_config.count_thresholds.items():
198
+ if category == "all" and total_detections > threshold:
199
+ alerts.append({
200
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
201
+ "alert_id": f"alert_{category}_{frame_key}",
202
+ "incident_category": self.CASE_TYPE,
203
+ "threshold_level": threshold,
204
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
205
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
206
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
207
+ })
208
+ elif category in per_category_count and per_category_count[category] > threshold:
209
+ alerts.append({
210
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
211
+ "alert_id": f"alert_{category}_{frame_key}",
212
+ "incident_category": self.CASE_TYPE,
213
+ "threshold_level": threshold,
214
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
215
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
216
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
217
+ })
218
+ return alerts
219
+
220
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: LaneDetectionConfig,
221
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
222
+ incidents = []
260
223
  total_detections = counting_summary.get("total_count", 0)
224
+ current_timestamp = self._get_current_timestamp_str(stream_info)
225
+ camera_info = self.get_camera_info_from_stream(stream_info)
226
+
227
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
261
228
 
262
229
  if total_detections > 0:
263
- level = "info"
230
+ level = "low"
264
231
  intensity = 5.0
232
+ start_timestamp = self._get_start_timestamp_str(stream_info)
233
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
234
+ self.current_incident_end_timestamp = 'Incident still active'
235
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
236
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
237
+ self.current_incident_end_timestamp = current_timestamp
238
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
239
+ self.current_incident_end_timestamp = 'N/A'
240
+
265
241
  if config.alert_config and config.alert_config.count_thresholds:
266
242
  threshold = config.alert_config.count_thresholds.get("all", 15)
267
243
  intensity = min(10.0, (total_detections / threshold) * 10)
268
- if intensity >= 7:
244
+ if intensity >= 9:
269
245
  level = "critical"
246
+ self._ascending_alert_list.append(3)
247
+ elif intensity >= 7:
248
+ level = "significant"
249
+ self._ascending_alert_list.append(2)
270
250
  elif intensity >= 5:
271
- level = "warning"
251
+ level = "medium"
252
+ self._ascending_alert_list.append(1)
272
253
  else:
273
- level = "info"
254
+ self._ascending_alert_list.append(0)
274
255
  else:
275
- if total_detections > 25:
256
+ if total_detections > 30:
276
257
  level = "critical"
258
+ intensity = 10.0
259
+ self._ascending_alert_list.append(3)
260
+ elif total_detections > 25:
261
+ level = "significant"
277
262
  intensity = 9.0
263
+ self._ascending_alert_list.append(2)
278
264
  elif total_detections > 15:
279
- level = "warning"
265
+ level = "medium"
280
266
  intensity = 7.0
267
+ self._ascending_alert_list.append(1)
281
268
  else:
282
- level = "info"
269
+ level = "low"
283
270
  intensity = min(10.0, total_detections / 3.0)
271
+ self._ascending_alert_list.append(0)
284
272
 
285
- human_text_lines = ["EVENTS DETECTED:"]
286
- human_text_lines.append(f" - {total_detections} lanes detected [INFO]")
273
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
274
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
287
275
  human_text = "\n".join(human_text_lines)
288
276
 
289
- event = {
290
- "type": "lane_detection",
291
- "stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
292
- "level": level,
293
- "intensity": round(intensity, 1),
294
- "config": {
295
- "min_value": 0,
296
- "max_value": 10,
297
- "level_settings": {"info": 2, "warning": 5, "critical": 7}
298
- },
299
- "application_name": "Lane Detection System",
300
- "application_version": "1.2",
301
- "location_info": None,
302
- "human_text": human_text
303
- }
304
- frame_events.append(event)
277
+ alert_settings = []
278
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
279
+ alert_settings.append({
280
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
281
+ "incident_category": self.CASE_TYPE,
282
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
283
+ "ascending": True,
284
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
285
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
286
+ })
287
+
288
+ event = self.create_incident(
289
+ incident_id=f"{self.CASE_TYPE}_{frame_number}",
290
+ incident_type=self.CASE_TYPE,
291
+ severity_level=level,
292
+ human_text=human_text,
293
+ camera_info=camera_info,
294
+ alerts=alerts,
295
+ alert_settings=alert_settings,
296
+ start_time=start_timestamp,
297
+ end_time=self.current_incident_end_timestamp,
298
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
299
+ )
300
+ incidents.append(event)
301
+ else:
302
+ self._ascending_alert_list.append(0)
303
+ incidents.append({})
304
+ return incidents
305
305
 
306
- for alert in alerts:
307
- total_detections = counting_summary.get("total_count", 0)
308
- intensity_message = "ALERT: Low lane density in the scene"
309
- if config.alert_config and config.alert_config.count_thresholds:
310
- threshold = config.alert_config.count_thresholds.get("all", 15)
311
- percentage = (total_detections / threshold) * 100 if threshold > 0 else 0
312
- if percentage < 20:
313
- intensity_message = "ALERT: Low lane density in the scene"
314
- elif percentage <= 50:
315
- intensity_message = "ALERT: Moderate lane density in the scene"
316
- elif percentage <= 70:
317
- intensity_message = "ALERT: High lane density in the scene"
318
- else:
319
- intensity_message = "ALERT: Very high lane density in the scene"
306
+ def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: LaneDetectionConfig,
307
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
308
+ camera_info = self.get_camera_info_from_stream(stream_info)
309
+ tracking_stats = []
310
+ total_detections = counting_summary.get("total_count", 0)
311
+ total_counts_dict = counting_summary.get("total_counts", {})
312
+ per_category_count = counting_summary.get("per_category_count", {})
313
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
314
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
315
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
316
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
317
+
318
+ total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
319
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
320
+
321
+ detections = []
322
+ for detection in counting_summary.get("detections", []):
323
+ bbox = detection.get("bounding_box", {})
324
+ category = detection.get("category", "lane")
325
+ if detection.get("masks"):
326
+ segmentation = detection.get("masks", [])
327
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
328
+ elif detection.get("segmentation"):
329
+ segmentation = detection.get("segmentation")
330
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
331
+ elif detection.get("mask"):
332
+ segmentation = detection.get("mask")
333
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
320
334
  else:
321
- if total_detections > 15:
322
- intensity_message = "ALERT: High lane density in the scene"
323
- elif total_detections == 1:
324
- intensity_message = "ALERT: Low lane density in the scene"
325
- else:
326
- intensity_message = "ALERT: Moderate lane density in the scene"
327
-
328
- alert_event = {
329
- "type": alert.get("type", "lane_density_alert"),
330
- "stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
331
- "level": alert.get("severity", "warning"),
332
- "intensity": 8.0,
333
- "config": {
334
- "min_value": 0,
335
- "max_value": 10,
336
- "level_settings": {"info": 2, "warning": 5, "critical": 7}
337
- },
338
- "application_name": "Lane Density Alert System",
339
- "application_version": "1.2",
340
- "location_info": alert.get("zone"),
341
- "human_text": f"{datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')} : {intensity_message}"
342
- }
343
- frame_events.append(alert_event)
335
+ detection_obj = self.create_detection_object(category, bbox)
336
+ detections.append(detection_obj)
337
+
338
+ alert_settings = []
339
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
340
+ alert_settings.append({
341
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
342
+ "incident_category": self.CASE_TYPE,
343
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
344
+ "ascending": True,
345
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
346
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
347
+ })
348
+
349
+ human_text_lines = [f"Tracking Statistics:"]
350
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
351
+ for cat, count in per_category_count.items():
352
+ human_text_lines.append(f"\t{cat}: {count}")
353
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
354
+ for cat, count in total_counts_dict.items():
355
+ if count > 0:
356
+ human_text_lines.append(f"\t{cat}: {count}")
357
+ if alerts:
358
+ for alert in alerts:
359
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
360
+ else:
361
+ human_text_lines.append("Alerts: None")
362
+ human_text = "\n".join(human_text_lines)
344
363
 
345
- return events
364
+ reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
365
+ tracking_stat = self.create_tracking_stats(
366
+ total_counts=total_counts,
367
+ current_counts=current_counts,
368
+ detections=detections,
369
+ human_text=human_text,
370
+ camera_info=camera_info,
371
+ alerts=alerts,
372
+ alert_settings=alert_settings,
373
+ reset_settings=reset_settings,
374
+ start_time=high_precision_start_timestamp,
375
+ reset_time=high_precision_reset_timestamp
376
+ )
377
+ tracking_stats.append(tracking_stat)
378
+ return tracking_stats
346
379
 
347
- def _generate_tracking_stats(self, counting_summary: Dict, insights: List[str], summary: str, config: LaneDetectionConfig,
348
- frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
349
- frame_key = str(frame_number) if frame_number is not None else "current_frame"
350
- tracking_stats = [{frame_key: []}]
351
- frame_tracking_stats = tracking_stats[0][frame_key]
380
+ def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: LaneDetectionConfig,
381
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
382
+ if is_empty:
383
+ return []
352
384
 
353
- total_detections = counting_summary.get("total_count", 0)
354
- total_counts = counting_summary.get("total_counts", {})
355
- cumulative_total = sum(total_counts.values()) if total_counts else 0
356
- per_category_count = counting_summary.get("per_category_count", {})
385
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
386
+ lines = {}
387
+ lines["Application Name"] = self.CASE_TYPE
388
+ lines["Application Version"] = self.CASE_VERSION
389
+ if len(incidents) > 0:
390
+ lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
391
+ if len(tracking_stats) > 0:
392
+ lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
393
+ if len(business_analytics) > 0:
394
+ lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
395
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
396
+ lines["Summary"] = "No Summary Data"
397
+ return [lines]
357
398
 
358
- track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
399
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
400
+ frame_track_ids = set()
401
+ for det in detections:
402
+ tid = det.get('track_id')
403
+ if tid is not None:
404
+ frame_track_ids.add(tid)
405
+ total_track_ids = set()
406
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
407
+ total_track_ids.update(s)
408
+ return {
409
+ "total_count": len(total_track_ids),
410
+ "current_frame_count": len(frame_track_ids),
411
+ "total_unique_track_ids": len(total_track_ids),
412
+ "current_frame_track_ids": list(frame_track_ids),
413
+ "last_update_time": time.time(),
414
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
415
+ }
359
416
 
360
- current_timestamp = self._get_current_timestamp_str(stream_info)
361
- start_timestamp = self._get_start_timestamp_str(stream_info)
417
+ def _update_tracking_state(self, detections: list):
418
+ if not hasattr(self, "_per_category_total_track_ids"):
419
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
420
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
362
421
 
363
- human_text_lines = []
364
- human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
365
- if total_detections > 0:
366
- category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
367
- if len(category_counts) == 1:
368
- detection_text = category_counts[0] + " detected"
369
- elif len(category_counts) == 2:
370
- detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
371
- else:
372
- detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
373
- human_text_lines.append(f"\t- {detection_text}")
374
- else:
375
- human_text_lines.append(f"\t- No detections")
422
+ for det in detections:
423
+ cat = det.get("category")
424
+ raw_track_id = det.get("track_id")
425
+ if cat not in self.target_categories or raw_track_id is None:
426
+ continue
427
+ bbox = det.get("bounding_box", det.get("bbox"))
428
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
429
+ det["track_id"] = canonical_id
430
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
431
+ self._current_frame_track_ids[cat].add(canonical_id)
376
432
 
377
- human_text_lines.append("")
378
- human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
379
- human_text_lines.append(f"\t- Total Lanes Detected: {cumulative_total}")
380
- if total_counts:
381
- for cat, count in total_counts.items():
382
- if count > 0:
383
- human_text_lines.append(f"\t- {cat}: {count}")
433
+ def get_total_counts(self):
434
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
384
435
 
385
- human_text = "\n".join(human_text_lines)
436
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
437
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
438
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
386
439
 
387
- tracking_stat = {
388
- "type": "lane_detection",
389
- "category": "traffic",
390
- "count": total_detections,
391
- "insights": insights,
392
- "summary": summary,
393
- "timestamp": datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC'),
394
- "human_text": human_text,
395
- "track_ids_info": track_ids_info,
396
- "global_frame_offset": getattr(self, '_global_frame_offset', 0),
397
- "local_frame_id": frame_key,
398
- "detections": counting_summary.get("detections", [])
399
- }
440
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
441
+ hours = int(timestamp // 3600)
442
+ minutes = int((timestamp % 3600) // 60)
443
+ seconds = round(float(timestamp % 60), 2)
444
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
400
445
 
401
- frame_tracking_stats.append(tracking_stat)
402
- return tracking_stats
446
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str] = None) -> str:
447
+ if not stream_info:
448
+ return "00:00:00.00"
449
+ if precision:
450
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
451
+ if frame_id:
452
+ start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
453
+ else:
454
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
455
+ return self._format_timestamp_for_video(start_time)
456
+ else:
457
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
458
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
459
+ if frame_id:
460
+ start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
461
+ else:
462
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
463
+ return self._format_timestamp_for_video(start_time)
464
+ else:
465
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
466
+ if stream_time_str:
467
+ try:
468
+ timestamp_str = stream_time_str.replace(" UTC", "")
469
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
470
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
471
+ return self._format_timestamp_for_stream(timestamp)
472
+ except:
473
+ return self._format_timestamp_for_stream(time.time())
474
+ else:
475
+ return self._format_timestamp_for_stream(time.time())
476
+
477
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
478
+ if not stream_info:
479
+ return "00:00:00"
480
+ if precision:
481
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
482
+ return "00:00:00"
483
+ else:
484
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
485
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
486
+ return "00:00:00"
487
+ else:
488
+ if self._tracking_start_time is None:
489
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
490
+ if stream_time_str:
491
+ try:
492
+ timestamp_str = stream_time_str.replace(" UTC", "")
493
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
494
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
495
+ except:
496
+ self._tracking_start_time = time.time()
497
+ else:
498
+ self._tracking_start_time = time.time()
499
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
500
+ dt = dt.replace(minute=0, second=0, microsecond=0)
501
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
403
502
 
404
503
  def _count_categories(self, detections: list, config: LaneDetectionConfig) -> dict:
405
504
  counts = {}
@@ -421,74 +520,6 @@ class LaneDetectionUseCase(BaseProcessor):
421
520
  ]
422
521
  }
423
522
 
424
- CATEGORY_DISPLAY = {
425
- "Divider-Line": "divider-line",
426
- "Dotted-Line": "dotted-line",
427
- "Double-Line": "double-line",
428
- "Random-Line": "random-line",
429
- "Road-Sign-Line": "road-sign-line",
430
- "Solid-Line": "solid-line"
431
- }
432
-
433
- def _generate_insights(self, summary: dict, config: LaneDetectionConfig) -> List[str]:
434
- insights = []
435
- per_cat = summary.get("per_category_count", {})
436
- total_detections = summary.get("total_count", 0)
437
-
438
- if total_detections == 0:
439
- insights.append("No lane detections in the scene")
440
- return insights
441
- insights.append(f"EVENT: Detected {total_detections} lanes in the scene")
442
-
443
- intensity_threshold = None
444
- if config.alert_config and config.alert_config.count_thresholds and "all" in config.alert_config.count_thresholds:
445
- intensity_threshold = config.alert_config.count_thresholds["all"]
446
-
447
- if intensity_threshold is not None:
448
- percentage = (total_detections / intensity_threshold) * 100
449
- if percentage < 20:
450
- insights.append(f"INTENSITY: Low lane density ({percentage:.1f}% of capacity)")
451
- elif percentage <= 50:
452
- insights.append(f"INTENSITY: Moderate lane density ({percentage:.1f}% of capacity)")
453
- elif percentage <= 70:
454
- insights.append(f"INTENSITY: High lane density ({percentage:.1f}% of capacity)")
455
- else:
456
- insights.append(f"INTENSITY: Very high lane density ({percentage:.1f}% of capacity)")
457
-
458
- for cat, count in per_cat.items():
459
- display = self.CATEGORY_DISPLAY.get(cat, cat)
460
- insights.append(f"{display}: {count}")
461
- return insights
462
-
463
- def _check_alerts(self, summary: dict, config: LaneDetectionConfig) -> List[Dict]:
464
- alerts = []
465
- if not config.alert_config:
466
- return alerts
467
- total = summary.get("total_count", 0)
468
- if config.alert_config.count_thresholds:
469
- for category, threshold in config.alert_config.count_thresholds.items():
470
- if category == "all" and total >= threshold:
471
- alerts.append({
472
- "type": "count_threshold",
473
- "severity": "warning",
474
- "message": f"Total lane detections ({total}) exceeds threshold ({threshold})",
475
- "category": category,
476
- "current_count": total,
477
- "threshold": threshold
478
- })
479
- elif category in summary.get("per_category_count", {}):
480
- count = summary.get("per_category_count", {})[category]
481
- if count >= threshold:
482
- alerts.append({
483
- "type": "count_threshold",
484
- "severity": "warning",
485
- "message": f"{category} count ({count}) exceeds threshold ({threshold})",
486
- "category": category,
487
- "current_count": count,
488
- "threshold": threshold
489
- })
490
- return alerts
491
-
492
523
  def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
493
524
  return [
494
525
  {
@@ -499,25 +530,6 @@ class LaneDetectionUseCase(BaseProcessor):
499
530
  for det in detections
500
531
  ]
501
532
 
502
- def _generate_summary(self, summary: dict, alerts: List) -> str:
503
- total = summary.get("total_count", 0)
504
- per_cat = summary.get("per_category_count", {})
505
- cumulative = summary.get("total_counts", {})
506
- cumulative_total = sum(cumulative.values()) if cumulative else 0
507
- lines = []
508
- if total > 0:
509
- lines.append(f"{total} lane detections")
510
- if per_cat:
511
- lines.append("detections:")
512
- for cat, count in per_cat.items():
513
- lines.append(f"\t{cat}: {count}")
514
- else:
515
- lines.append("No lane detections")
516
- lines.append(f"Total lane detections: {cumulative_total}")
517
- if alerts:
518
- lines.append(f"{len(alerts)} alert(s)")
519
- return "\n".join(lines)
520
-
521
533
  def _compute_iou(self, box1: Any, box2: Any) -> float:
522
534
  def _bbox_to_list(bbox):
523
535
  if bbox is None:
@@ -539,31 +551,25 @@ class LaneDetectionUseCase(BaseProcessor):
539
551
  return 0.0
540
552
  x1_min, y1_min, x1_max, y1_max = l1
541
553
  x2_min, y2_min, x2_max, y2_max = l2
542
-
543
554
  x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
544
555
  y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
545
556
  x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
546
557
  y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
547
-
548
558
  inter_x_min = max(x1_min, x2_min)
549
559
  inter_y_min = max(y1_min, y2_min)
550
560
  inter_x_max = min(x1_max, x2_max)
551
561
  inter_y_max = min(y1_max, y2_max)
552
-
553
562
  inter_w = max(0.0, inter_x_max - inter_x_min)
554
563
  inter_h = max(0.0, inter_y_max - inter_y_min)
555
564
  inter_area = inter_w * inter_h
556
-
557
565
  area1 = (x1_max - x1_min) * (y1_max - y1_min)
558
566
  area2 = (x2_max - x2_min) * (y2_max - y2_min)
559
567
  union_area = area1 + area2 - inter_area
560
-
561
568
  return (inter_area / union_area) if union_area > 0 else 0.0
562
569
 
563
570
  def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
564
571
  if raw_id is None or bbox is None:
565
572
  return raw_id
566
-
567
573
  now = time.time()
568
574
  if raw_id in self._track_aliases:
569
575
  canonical_id = self._track_aliases[raw_id]
@@ -573,7 +579,6 @@ class LaneDetectionUseCase(BaseProcessor):
573
579
  track_info["last_update"] = now
574
580
  track_info["raw_ids"].add(raw_id)
575
581
  return canonical_id
576
-
577
582
  for canonical_id, info in self._canonical_tracks.items():
578
583
  if now - info["last_update"] > self._track_merge_time_window:
579
584
  continue
@@ -584,7 +589,6 @@ class LaneDetectionUseCase(BaseProcessor):
584
589
  info["last_update"] = now
585
590
  info["raw_ids"].add(raw_id)
586
591
  return canonical_id
587
-
588
592
  canonical_id = raw_id
589
593
  self._track_aliases[raw_id] = canonical_id
590
594
  self._canonical_tracks[canonical_id] = {