matrice 1.0.99145__py3-none-any.whl → 1.0.99147__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,605 @@
1
+ """
2
+ Pipeline Monitoring Use Case for Post-Processing
3
+
4
+ This module provides Pipeline monitoring functionality with congestion detection,
5
+ zone analysis, and alert generation.
6
+
7
+ """
8
+
9
+ from typing import Any, Dict, List, Optional
10
+ from dataclasses import asdict
11
+ import time
12
+ from datetime import datetime, timezone
13
+
14
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
15
+ from ..utils import (
16
+ filter_by_confidence,
17
+ filter_by_categories,
18
+ apply_category_mapping,
19
+ count_objects_by_category,
20
+ count_objects_in_zones,
21
+ calculate_counting_summary,
22
+ match_results_structure,
23
+ bbox_smoothing,
24
+ BBoxSmoothingConfig,
25
+ BBoxSmoothingTracker
26
+ )
27
+ from dataclasses import dataclass, field
28
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
29
+
30
+
31
+ @dataclass
32
+ class PipelineDetectionConfig(BaseConfig):
33
+ """Configuration for pipeline detection use case in pipeline monitoring."""
34
+ # Smoothing configuration
35
+ enable_smoothing: bool = True
36
+ smoothing_algorithm: str = "observability" # "window" or "observability"
37
+ smoothing_window_size: int = 20
38
+ smoothing_cooldown_frames: int = 5
39
+ smoothing_confidence_range_factor: float = 0.5
40
+
41
+ # confidence thresholds
42
+ confidence_threshold: float = 0.4
43
+
44
+ usecase_categories: List[str] = field(
45
+ default_factory=lambda: ['pipe']
46
+ )
47
+
48
+ target_categories: List[str] = field(
49
+ default_factory=lambda: ['pipe']
50
+ )
51
+
52
+ alert_config: Optional[AlertConfig] = None
53
+
54
+ index_to_category: Optional[Dict[int, str]] = field(
55
+ default_factory=lambda: {
56
+ 0: "pipe"
57
+ }
58
+ )
59
+
60
+
61
+ class PipelineDetectionUseCase(BaseProcessor):
62
+ # Human-friendly display names for categories
63
+ CATEGORY_DISPLAY = {
64
+ "pipe": "pipe"
65
+ }
66
+ def __init__(self):
67
+ super().__init__("pipeline_detection")
68
+ self.category = "pipeline_detection"
69
+
70
+ self.CASE_TYPE: Optional[str] = 'pipeline_detection'
71
+ self.CASE_VERSION: Optional[str] = '1.0'
72
+
73
+ # List of categories to track
74
+ self.target_categories = ['pipe']
75
+
76
+ # Initialize smoothing tracker
77
+ self.smoothing_tracker = None
78
+
79
+ # Initialize advanced tracker (will be created on first use)
80
+ self.tracker = None
81
+
82
+ # Initialize tracking state variables
83
+ self._total_frame_counter = 0
84
+ self._global_frame_offset = 0
85
+
86
+ # Track start time for "TOTAL SINCE" calculation
87
+ self._tracking_start_time = None
88
+
89
+ self._track_aliases: Dict[Any, Any] = {}
90
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
91
+ # Tunable parameters – adjust if necessary for specific scenarios
92
+ self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
93
+ self._track_merge_time_window: float = 7.0 # seconds within which to merge
94
+
95
+ self._ascending_alert_list: List[int] = []
96
+ self.current_incident_end_timestamp: str = "N/A"
97
+
98
+
99
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
100
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
101
+ start_time = time.time()
102
+ if not isinstance(config, PipelineDetectionConfig):
103
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
104
+ if context is None:
105
+ context = ProcessingContext()
106
+
107
+ input_format = match_results_structure(data)
108
+ context.input_format = input_format
109
+ context.confidence_threshold = config.confidence_threshold
110
+
111
+ if config.confidence_threshold is not None:
112
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
113
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
114
+ else:
115
+ processed_data = data
116
+ self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
117
+
118
+ if config.index_to_category:
119
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
120
+ self.logger.debug("Applied category mapping")
121
+
122
+ if config.target_categories:
123
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
124
+ self.logger.debug(f"Applied category filtering")
125
+
126
+ if config.enable_smoothing:
127
+ if self.smoothing_tracker is None:
128
+ smoothing_config = BBoxSmoothingConfig(
129
+ smoothing_algorithm=config.smoothing_algorithm,
130
+ window_size=config.smoothing_window_size,
131
+ cooldown_frames=config.smoothing_cooldown_frames,
132
+ confidence_threshold=config.confidence_threshold,
133
+ confidence_range_factor=config.smoothing_confidence_range_factor,
134
+ enable_smoothing=True
135
+ )
136
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
137
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
138
+
139
+ try:
140
+ from ..advanced_tracker import AdvancedTracker
141
+ from ..advanced_tracker.config import TrackerConfig
142
+ if self.tracker is None:
143
+ tracker_config = TrackerConfig()
144
+ self.tracker = AdvancedTracker(tracker_config)
145
+ self.logger.info("Initialized AdvancedTracker for Pipeline Monitoring and tracking")
146
+ processed_data = self.tracker.update(processed_data)
147
+ except Exception as e:
148
+ self.logger.warning(f"AdvancedTracker failed: {e}")
149
+
150
+ self._update_tracking_state(processed_data)
151
+ self._total_frame_counter += 1
152
+
153
+ frame_number = None
154
+ if stream_info:
155
+ input_settings = stream_info.get("input_settings", {})
156
+ start_frame = input_settings.get("start_frame")
157
+ end_frame = input_settings.get("end_frame")
158
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
159
+ frame_number = start_frame
160
+
161
+ general_counting_summary = calculate_counting_summary(data)
162
+ counting_summary = self._count_categories(processed_data, config)
163
+ total_counts = self.get_total_counts()
164
+ counting_summary['total_counts'] = total_counts
165
+
166
+ alerts = self._check_alerts(counting_summary, frame_number, config)
167
+ predictions = self._extract_predictions(processed_data)
168
+
169
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
170
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
171
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
172
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
173
+
174
+ incidents = incidents_list[0] if incidents_list else {}
175
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
176
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
177
+ summary = summary_list[0] if summary_list else {}
178
+ agg_summary = {str(frame_number): {
179
+ "incidents": incidents,
180
+ "tracking_stats": tracking_stats,
181
+ "business_analytics": business_analytics,
182
+ "alerts": alerts,
183
+ "human_text": summary}
184
+ }
185
+
186
+ context.mark_completed()
187
+
188
+ result = self.create_result(
189
+ data={"agg_summary": agg_summary},
190
+ usecase=self.name,
191
+ category=self.category,
192
+ context=context
193
+ )
194
+ return result
195
+
196
+
197
+ def _check_alerts(self, summary: dict, frame_number:Any, config: PipelineDetectionConfig) -> List[Dict]:
198
+ def get_trend(data, lookback=900, threshold=0.6):
199
+ window = data[-lookback:] if len(data) >= lookback else data
200
+ if len(window) < 2:
201
+ return True
202
+ increasing = 0
203
+ total = 0
204
+ for i in range(1, len(window)):
205
+ if window[i] >= window[i - 1]:
206
+ increasing += 1
207
+ total += 1
208
+ ratio = increasing / total
209
+ if ratio >= threshold:
210
+ return True
211
+ elif ratio <= (1 - threshold):
212
+ return False
213
+
214
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
215
+ alerts = []
216
+ total_detections = summary.get("total_count", 0)
217
+ total_counts_dict = summary.get("total_counts", {})
218
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
219
+ per_category_count = summary.get("per_category_count", {})
220
+
221
+ if not config.alert_config:
222
+ return alerts
223
+
224
+ total = summary.get("total_count", 0)
225
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
226
+ for category, threshold in config.alert_config.count_thresholds.items():
227
+ if category == "all" and total > threshold:
228
+ alerts.append({
229
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
230
+ "alert_id": "alert_"+category+'_'+frame_key,
231
+ "incident_category": self.CASE_TYPE,
232
+ "threshold_level": threshold,
233
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
234
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'], getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
235
+ })
236
+ elif category in summary.get("per_category_count", {}):
237
+ count = summary.get("per_category_count", {})[category]
238
+ if count > threshold:
239
+ alerts.append({
240
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
241
+ "alert_id": "alert_"+category+'_'+frame_key,
242
+ "incident_category": self.CASE_TYPE,
243
+ "threshold_level": threshold,
244
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
245
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'], getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
246
+ })
247
+ return alerts
248
+
249
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: PipelineDetectionConfig, frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
250
+ incidents = []
251
+ total_detections = counting_summary.get("total_count", 0)
252
+ current_timestamp = self._get_current_timestamp_str(stream_info)
253
+ camera_info = self.get_camera_info_from_stream(stream_info)
254
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
255
+ if total_detections > 0:
256
+ level = "low"
257
+ intensity = 5.0
258
+ start_timestamp = self._get_start_timestamp_str(stream_info)
259
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
260
+ self.current_incident_end_timestamp = 'Incident still active'
261
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
262
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
263
+ self.current_incident_end_timestamp = current_timestamp
264
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
265
+ self.current_incident_end_timestamp = 'N/A'
266
+ if config.alert_config and config.alert_config.count_thresholds:
267
+ threshold = config.alert_config.count_thresholds.get("all", 15)
268
+ intensity = min(10.0, (total_detections / threshold) * 10)
269
+ if intensity >= 9:
270
+ level = "critical"
271
+ self._ascending_alert_list.append(3)
272
+ elif intensity >= 7:
273
+ level = "significant"
274
+ self._ascending_alert_list.append(2)
275
+ elif intensity >= 5:
276
+ level = "medium"
277
+ self._ascending_alert_list.append(1)
278
+ else:
279
+ level = "low"
280
+ self._ascending_alert_list.append(0)
281
+ else:
282
+ if total_detections > 30:
283
+ level = "critical"
284
+ intensity = 10.0
285
+ self._ascending_alert_list.append(3)
286
+ elif total_detections > 25:
287
+ level = "significant"
288
+ intensity = 9.0
289
+ self._ascending_alert_list.append(2)
290
+ elif total_detections > 15:
291
+ level = "medium"
292
+ intensity = 7.0
293
+ self._ascending_alert_list.append(1)
294
+ else:
295
+ level = "low"
296
+ intensity = min(10.0, total_detections / 3.0)
297
+ self._ascending_alert_list.append(0)
298
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
299
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
300
+ human_text = "\n".join(human_text_lines)
301
+ alert_settings=[]
302
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
303
+ alert_settings.append({
304
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
305
+ "incident_category": self.CASE_TYPE,
306
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
307
+ "ascending": True,
308
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'], getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
309
+ })
310
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
311
+ severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
312
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
313
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
314
+ incidents.append(event)
315
+ else:
316
+ self._ascending_alert_list.append(0)
317
+ incidents.append({})
318
+ return incidents
319
+
320
+ def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: PipelineDetectionConfig, frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
321
+ camera_info = self.get_camera_info_from_stream(stream_info)
322
+ tracking_stats = []
323
+ total_detections = counting_summary.get("total_count", 0)
324
+ total_counts_dict = counting_summary.get("total_counts", {})
325
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
326
+ per_category_count = counting_summary.get("per_category_count", {})
327
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
328
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
329
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
330
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
331
+ total_counts = []
332
+ for cat, count in total_counts_dict.items():
333
+ if count > 0:
334
+ total_counts.append({"category": cat, "count": count})
335
+ current_counts = []
336
+ for cat, count in per_category_count.items():
337
+ if count > 0 or total_detections > 0:
338
+ current_counts.append({"category": cat, "count": count})
339
+ detections = []
340
+ for detection in counting_summary.get("detections", []):
341
+ bbox = detection.get("bounding_box", {})
342
+ category = detection.get("category", "pipe")
343
+ detection_obj = self.create_detection_object(category, bbox)
344
+ detections.append(detection_obj)
345
+ alert_settings = []
346
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
347
+ alert_settings.append({
348
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
349
+ "incident_category": self.CASE_TYPE,
350
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
351
+ "ascending": True,
352
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'], getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])}
353
+ })
354
+ human_text_lines = [f"Tracking Statistics:"]
355
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
356
+ for cat, count in per_category_count.items():
357
+ human_text_lines.append(f"\t{cat}: {count}")
358
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
359
+ for cat, count in total_counts_dict.items():
360
+ if count > 0:
361
+ human_text_lines.append(f"\t{cat}: {count}")
362
+ if alerts:
363
+ for alert in alerts:
364
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
365
+ else:
366
+ human_text_lines.append("Alerts: None")
367
+ human_text = "\n".join(human_text_lines)
368
+ reset_settings=[{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
369
+ tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
370
+ detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
371
+ reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
372
+ reset_time=high_precision_reset_timestamp)
373
+ tracking_stats.append(tracking_stat)
374
+ return tracking_stats
375
+
376
+ def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: PipelineDetectionConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
377
+ if is_empty:
378
+ return []
379
+
380
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
381
+ lines = {}
382
+ lines["Application Name"] = self.CASE_TYPE
383
+ lines["Application Version"] = self.CASE_VERSION
384
+ if len(incidents) > 0:
385
+ lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
386
+ if len(tracking_stats) > 0:
387
+ lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
388
+ if len(business_analytics) > 0:
389
+ lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
390
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
391
+ lines["Summary"] = "No Summary Data"
392
+ return [lines]
393
+
394
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
395
+ frame_track_ids = set()
396
+ for det in detections:
397
+ tid = det.get('track_id')
398
+ if tid is not None:
399
+ frame_track_ids.add(tid)
400
+ total_track_ids = set()
401
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
402
+ total_track_ids.update(s)
403
+ return {
404
+ "total_count": len(total_track_ids),
405
+ "current_frame_count": len(frame_track_ids),
406
+ "total_unique_track_ids": len(total_track_ids),
407
+ "current_frame_track_ids": list(frame_track_ids),
408
+ "last_update_time": time.time(),
409
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
410
+ }
411
+
412
+ def _update_tracking_state(self, detections: list):
413
+ if not hasattr(self, "_per_category_total_track_ids"):
414
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
415
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
416
+ for det in detections:
417
+ cat = det.get("category")
418
+ raw_track_id = det.get("track_id")
419
+ if cat not in self.target_categories or raw_track_id is None:
420
+ continue
421
+ bbox = det.get("bounding_box", det.get("bbox"))
422
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
423
+ det["track_id"] = canonical_id
424
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
425
+ self._current_frame_track_ids[cat].add(canonical_id)
426
+
427
+ def get_total_counts(self):
428
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
429
+
430
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
431
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
432
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
433
+
434
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
435
+ hours = int(timestamp // 3600)
436
+ minutes = int((timestamp % 3600) // 60)
437
+ seconds = round(float(timestamp % 60),2)
438
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
439
+
440
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
441
+ if not stream_info:
442
+ return "00:00:00.00"
443
+ if precision:
444
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
445
+ if frame_id:
446
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
447
+ else:
448
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
449
+ stream_time_str = self._format_timestamp_for_video(start_time)
450
+ return stream_time_str
451
+ else:
452
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
453
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
454
+ if frame_id:
455
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
456
+ else:
457
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
458
+ stream_time_str = self._format_timestamp_for_video(start_time)
459
+ return stream_time_str
460
+ else:
461
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
462
+ if stream_time_str:
463
+ try:
464
+ timestamp_str = stream_time_str.replace(" UTC", "")
465
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
466
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
467
+ return self._format_timestamp_for_stream(timestamp)
468
+ except:
469
+ return self._format_timestamp_for_stream(time.time())
470
+ else:
471
+ return self._format_timestamp_for_stream(time.time())
472
+
473
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
474
+ if not stream_info:
475
+ return "00:00:00"
476
+ if precision:
477
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
478
+ return "00:00:00"
479
+ else:
480
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
481
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
482
+ return "00:00:00"
483
+ else:
484
+ if self._tracking_start_time is None:
485
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
486
+ if stream_time_str:
487
+ try:
488
+ timestamp_str = stream_time_str.replace(" UTC", "")
489
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
490
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
491
+ except:
492
+ self._tracking_start_time = time.time()
493
+ else:
494
+ self._tracking_start_time = time.time()
495
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
496
+ dt = dt.replace(minute=0, second=0, microsecond=0)
497
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
498
+
499
+ def _count_categories(self, detections: list, config: PipelineDetectionConfig) -> dict:
500
+ counts = {}
501
+ for det in detections:
502
+ cat = det.get('category', 'unknown')
503
+ counts[cat] = counts.get(cat, 0) + 1
504
+ return {
505
+ "total_count": sum(counts.values()),
506
+ "per_category_count": counts,
507
+ "detections": [
508
+ {
509
+ "bounding_box": det.get("bounding_box"),
510
+ "category": det.get("category"),
511
+ "confidence": det.get("confidence"),
512
+ "track_id": det.get("track_id"),
513
+ "frame_id": det.get("frame_id")
514
+ }
515
+ for det in detections
516
+ ]
517
+ }
518
+
519
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
520
+ return [
521
+ {
522
+ "category": det.get("category", "unknown"),
523
+ "confidence": det.get("confidence", 0.0),
524
+ "bounding_box": det.get("bounding_box", {})
525
+ }
526
+ for det in detections
527
+ ]
528
+
529
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
530
+ def _bbox_to_list(bbox):
531
+ if bbox is None:
532
+ return []
533
+ if isinstance(bbox, list):
534
+ return bbox[:4] if len(bbox) >= 4 else []
535
+ if isinstance(bbox, dict):
536
+ if "xmin" in bbox:
537
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
538
+ if "x1" in bbox:
539
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
540
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
541
+ return values[:4] if len(values) >= 4 else []
542
+ return []
543
+ l1 = _bbox_to_list(box1)
544
+ l2 = _bbox_to_list(box2)
545
+ if len(l1) < 4 or len(l2) < 4:
546
+ return 0.0
547
+ x1_min, y1_min, x1_max, y1_max = l1
548
+ x2_min, y2_min, x2_max, y2_max = l2
549
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
550
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
551
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
552
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
553
+ inter_x_min = max(x1_min, x2_min)
554
+ inter_y_min = max(y1_min, y2_min)
555
+ inter_x_max = min(x1_max, x2_max)
556
+ inter_y_max = min(y1_max, y2_max)
557
+ inter_w = max(0.0, inter_x_max - inter_x_min)
558
+ inter_h = max(0.0, inter_y_max - inter_y_min)
559
+ inter_area = inter_w * inter_h
560
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
561
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
562
+ union_area = area1 + area2 - inter_area
563
+ return (inter_area / union_area) if union_area > 0 else 0.0
564
+
565
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
566
+ if raw_id is None or bbox is None:
567
+ return raw_id
568
+ now = time.time()
569
+ if raw_id in self._track_aliases:
570
+ canonical_id = self._track_aliases[raw_id]
571
+ track_info = self._canonical_tracks.get(canonical_id)
572
+ if track_info is not None:
573
+ track_info["last_bbox"] = bbox
574
+ track_info["last_update"] = now
575
+ track_info["raw_ids"].add(raw_id)
576
+ return canonical_id
577
+ for canonical_id, info in self._canonical_tracks.items():
578
+ if now - info["last_update"] > self._track_merge_time_window:
579
+ continue
580
+ iou = self._compute_iou(bbox, info["last_bbox"])
581
+ if iou >= self._track_merge_iou_threshold:
582
+ self._track_aliases[raw_id] = canonical_id
583
+ info["last_bbox"] = bbox
584
+ info["last_update"] = now
585
+ info["raw_ids"].add(raw_id)
586
+ return canonical_id
587
+ canonical_id = raw_id
588
+ self._track_aliases[raw_id] = canonical_id
589
+ self._canonical_tracks[canonical_id] = {
590
+ "last_bbox": bbox,
591
+ "last_update": now,
592
+ "raw_ids": {raw_id},
593
+ }
594
+ return canonical_id
595
+
596
+ def _format_timestamp(self, timestamp: float) -> str:
597
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
598
+
599
+ def _get_tracking_start_time(self) -> str:
600
+ if self._tracking_start_time is None:
601
+ return "N/A"
602
+ return self._format_timestamp(self._tracking_start_time)
603
+
604
+ def _set_tracking_start_time(self) -> None:
605
+ self._tracking_start_time = time.time()