matrice-analytics 0.1.106__py3-none-any.whl → 0.1.124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. matrice_analytics/post_processing/__init__.py +22 -0
  2. matrice_analytics/post_processing/config.py +15 -0
  3. matrice_analytics/post_processing/core/config.py +107 -1
  4. matrice_analytics/post_processing/face_reg/face_recognition.py +2 -2
  5. matrice_analytics/post_processing/post_processor.py +16 -0
  6. matrice_analytics/post_processing/usecases/__init__.py +9 -0
  7. matrice_analytics/post_processing/usecases/crowdflow.py +1088 -0
  8. matrice_analytics/post_processing/usecases/footfall.py +103 -62
  9. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +2 -1
  10. matrice_analytics/post_processing/usecases/parking_lot_analytics.py +1137 -0
  11. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +30 -4
  12. matrice_analytics/post_processing/usecases/vehicle_monitoring_drone_view.py +33 -6
  13. matrice_analytics/post_processing/usecases/vehicle_monitoring_parking_lot.py +18 -2
  14. matrice_analytics/post_processing/usecases/vehicle_monitoring_wrong_way.py +1021 -0
  15. matrice_analytics/post_processing/utils/alert_instance_utils.py +18 -5
  16. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +25 -2
  17. matrice_analytics/post_processing/utils/incident_manager_utils.py +12 -1
  18. matrice_analytics/post_processing/utils/parking_analytics_tracker.py +359 -0
  19. matrice_analytics/post_processing/utils/wrong_way_tracker.py +670 -0
  20. {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/METADATA +1 -1
  21. {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/RECORD +24 -19
  22. {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/WHEEL +0 -0
  23. {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/licenses/LICENSE.txt +0 -0
  24. {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1137 @@
1
+ from typing import Any, Dict, List, Optional, Tuple
2
+ from dataclasses import asdict
3
+ import time
4
+ from datetime import datetime, timezone
5
+
6
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
7
+ from ..utils import (
8
+ filter_by_confidence,
9
+ filter_by_categories,
10
+ apply_category_mapping,
11
+ count_objects_by_category,
12
+ count_objects_in_zones,
13
+ calculate_counting_summary,
14
+ match_results_structure,
15
+ bbox_smoothing,
16
+ BBoxSmoothingConfig,
17
+ BBoxSmoothingTracker
18
+ )
19
+ from dataclasses import dataclass, field
20
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
21
+ from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
22
+ from ..utils.parking_analytics_tracker import ParkingAnalyticsTracker, VehicleParkingState
23
+
24
+ @dataclass
25
+ class ParkingLotAnalyticsConfig(BaseConfig):
26
+ """Configuration for vehicle detection use case in parking lot analytics (parking time)."""
27
+ enable_smoothing: bool = True
28
+ smoothing_algorithm: str = "observability"
29
+ smoothing_window_size: int = 20
30
+ smoothing_cooldown_frames: int = 5
31
+ smoothing_confidence_range_factor: float = 0.5
32
+ confidence_threshold: float = 0.6
33
+
34
+ # Class Aggregation: Configuration parameters
35
+ enable_class_aggregation: bool = True
36
+ class_aggregation_window_size: int = 30 # 30 frames ≈ 1 second at 30 FPS
37
+
38
+ # Parking Analytics Specific Parameters
39
+ enable_parking_analytics: bool = True
40
+ parked_threshold_seconds: float = 15.0
41
+ movement_threshold_percent: float = 5.0
42
+ movement_window_frames: int = 60
43
+ max_history_size: int = 100
44
+ assumed_fps: float = 30.0
45
+
46
+ #JBK_720_GATE POLYGON = [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]]
47
+ zone_config: Optional[Dict[str, List[List[float]]]] = None #field(
48
+ # default_factory=lambda: {
49
+ # "zones": {
50
+ # "Interest_Region": [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]],
51
+ # }
52
+ # }
53
+ # )
54
+ usecase_categories: List[str] = field(
55
+ default_factory=lambda: [
56
+ 'bicycle', 'motorcycle', 'car', 'van', 'bus', 'truck'
57
+ ]
58
+ )
59
+ target_categories: List[str] = field(
60
+ default_factory=lambda: [
61
+ 'bicycle', 'motorcycle', 'car', 'van', 'bus', 'truck'
62
+ ]
63
+ )
64
+ alert_config: Optional[AlertConfig] = None
65
+ index_to_category: Optional[Dict[int, str]] = field(
66
+ default_factory=lambda: {
67
+ 0: "bicycle",
68
+ 1: "motorcycle",
69
+ 2: "car",
70
+ 3: "van",
71
+ 4: "bus",
72
+ 5: "truck"
73
+ }
74
+ )
75
+
76
+ class ParkingLotAnalyticsUseCase(BaseProcessor):
77
+ CATEGORY_DISPLAY = {
78
+ "bicycle": "Bicycle",
79
+ "motorcycle": "Motorcycle",
80
+ "car": "Car",
81
+ "van": "Van",
82
+ "bus": "Bus",
83
+ "truck": "Truck",
84
+ }
85
+
86
+ def __init__(self):
87
+ super().__init__("parking_lot_analytics")
88
+ self.category = "traffic"
89
+ self.CASE_TYPE: Optional[str] = 'parking_lot_analytics'
90
+ self.CASE_VERSION: Optional[str] = '1.0'
91
+ self.target_categories = ['bicycle', 'motorcycle', 'car', 'van', 'bus', 'truck' ]
92
+ self.smoothing_tracker = None
93
+ self.tracker = None
94
+ self._total_frame_counter = 0
95
+ self._global_frame_offset = 0
96
+ self._tracking_start_time = None
97
+ self._track_aliases: Dict[Any, Any] = {}
98
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
99
+ self._track_merge_iou_threshold: float = 0.05
100
+ self._track_merge_time_window: float = 7.0
101
+ self._ascending_alert_list: List[int] = []
102
+ self.current_incident_end_timestamp: str = "N/A"
103
+ self.start_timer = None
104
+
105
+ # Parking analytics tracker
106
+ self.parking_analytics_tracker = None
107
+
108
+ # Track ID storage for total count calculation
109
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
110
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
111
+ self._tracked_in_zones = set() # New: Unique track IDs that have entered any zone
112
+ self._total_count = 0 # Cached total count
113
+ self._last_update_time = time.time() # Track when last updated
114
+ self._total_count_list = []
115
+
116
+ # Zone-based tracking storage
117
+ self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
118
+ self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
119
+ self._zone_current_counts = {} # zone_name -> current count in zone
120
+ self._zone_total_counts = {} # zone_name -> total count that have been in zone
121
+
122
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
123
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
124
+ processing_start = time.time()
125
+ # Relaxed check: Accept ParkingLotAnalyticsConfig OR any config with matching usecase/category
126
+ # This handles multiprocessing module path mismatches while maintaining type safety
127
+ is_valid_config = (
128
+ isinstance(config, ParkingLotAnalyticsConfig) or
129
+ (hasattr(config, 'usecase') and config.usecase == 'vehicle_monitoring_parking_lot' and
130
+ hasattr(config, 'category') and config.category == 'traffic')
131
+ )
132
+ if not is_valid_config:
133
+ self.logger.error(
134
+ f"Config validation failed in vehicle_monitoring_parking_lot. "
135
+ f"Got type={type(config).__name__}, module={type(config).__module__}, "
136
+ f"usecase={getattr(config, 'usecase', 'N/A')}, category={getattr(config, 'category', 'N/A')}"
137
+ )
138
+ return self.create_error_result(
139
+ f"Invalid config type: expected ParkingLotAnalyticsConfig or config with usecase='vehicle_monitoring_parking_lot', "
140
+ f"got {type(config).__name__} with usecase={getattr(config, 'usecase', 'N/A')}",
141
+ usecase=self.name, category=self.category, context=context
142
+ )
143
+ if context is None:
144
+ context = ProcessingContext()
145
+
146
+ # Determine if zones are configured
147
+ has_zones = bool(config.zone_config and config.zone_config.get('zones'))
148
+
149
+ # Normalize typical YOLO outputs (COCO pretrained) to internal schema
150
+ data = self._normalize_yolo_results(data, getattr(config, 'index_to_category', None))
151
+
152
+ input_format = match_results_structure(data)
153
+ context.input_format = input_format
154
+ context.confidence_threshold = config.confidence_threshold
155
+ config.confidence_threshold = 0.25
156
+ # param to be updated
157
+
158
+ if config.confidence_threshold is not None:
159
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
160
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
161
+ else:
162
+ processed_data = data
163
+ self.logger.debug("Did not apply confidence filtering since no threshold provided")
164
+
165
+ if config.index_to_category:
166
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
167
+ self.logger.debug("Applied category mapping")
168
+
169
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
170
+ if config.target_categories:
171
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
172
+ self.logger.debug("Applied category filtering")
173
+
174
+
175
+ if config.enable_smoothing:
176
+ if self.smoothing_tracker is None:
177
+ smoothing_config = BBoxSmoothingConfig(
178
+ smoothing_algorithm=config.smoothing_algorithm,
179
+ window_size=config.smoothing_window_size,
180
+ cooldown_frames=config.smoothing_cooldown_frames,
181
+ confidence_threshold=config.confidence_threshold,
182
+ confidence_range_factor=config.smoothing_confidence_range_factor,
183
+ enable_smoothing=True
184
+ )
185
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
186
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
187
+
188
+ try:
189
+ from ..advanced_tracker import AdvancedTracker
190
+ from ..advanced_tracker.config import TrackerConfig
191
+ if self.tracker is None:
192
+ tracker_config = TrackerConfig(
193
+ # CLASS AGGREGATION: Map from use case config
194
+ enable_class_aggregation=config.enable_class_aggregation,
195
+ class_aggregation_window_size=config.class_aggregation_window_size
196
+ )
197
+ self.tracker = AdvancedTracker(tracker_config)
198
+ self.logger.info("Initialized AdvancedTracker for Parking Lot Analytics use case")
199
+
200
+ if config.enable_class_aggregation:
201
+ self.logger.info(
202
+ f"AdvancedTracker initialized with class aggregation "
203
+ f"(window_size={config.class_aggregation_window_size})"
204
+ )
205
+ else:
206
+ self.logger.info("AdvancedTracker initialized without class aggregation")
207
+
208
+ processed_data = self.tracker.update(processed_data)
209
+ except Exception as e:
210
+ self.logger.warning(f"AdvancedTracker failed: {e}")
211
+
212
+ # Parking Analytics Update
213
+ parking_analytics = None
214
+ if config.enable_parking_analytics and processed_data:
215
+ if self.parking_analytics_tracker is None:
216
+ self.parking_analytics_tracker = ParkingAnalyticsTracker(
217
+ parked_threshold_frames=int(config.parked_threshold_seconds * config.assumed_fps),
218
+ movement_threshold_percent=config.movement_threshold_percent,
219
+ movement_window_frames=config.movement_window_frames,
220
+ fps=config.assumed_fps
221
+ )
222
+ self.logger.info(
223
+ f"Initialized ParkingAnalyticsTracker: "
224
+ f"parked_threshold={config.parked_threshold_seconds}s, "
225
+ f"movement_threshold={config.movement_threshold_percent}%"
226
+ )
227
+
228
+ current_timestamp = self._get_current_timestamp_str(stream_info)
229
+ parking_analytics = self.parking_analytics_tracker.update(
230
+ detections=processed_data,
231
+ current_frame=self._total_frame_counter,
232
+ current_timestamp=current_timestamp
233
+ )
234
+
235
+ # Log summary
236
+ pa_summary = parking_analytics.get('summary', {})
237
+ self.logger.info(
238
+ f"[Frame {self._total_frame_counter}] Parking Analytics: "
239
+ f"active={pa_summary.get('total_active', 0)}, "
240
+ f"parked={pa_summary.get('total_parked', 0)}, "
241
+ f"avg_dwell={pa_summary.get('average_dwell_time', 0)}s"
242
+ )
243
+
244
+ self._update_tracking_state(processed_data, has_zones=has_zones)
245
+ self._total_frame_counter += 1
246
+
247
+ frame_number = None
248
+ if stream_info:
249
+ input_settings = stream_info.get("input_settings", {})
250
+ start_frame = input_settings.get("start_frame")
251
+ end_frame = input_settings.get("end_frame")
252
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
253
+ frame_number = start_frame
254
+
255
+ general_counting_summary = calculate_counting_summary(data)
256
+ counting_summary = self._count_categories(processed_data, config)
257
+ total_counts = self.get_total_counts()
258
+ counting_summary['total_counts'] = total_counts
259
+ counting_summary['categories'] = {}
260
+ for detection in processed_data:
261
+ category = detection.get("category", "unknown")
262
+ counting_summary["categories"][category] = counting_summary["categories"].get(category, 0) + 1
263
+
264
+ zone_analysis = {}
265
+ if has_zones:
266
+ # Convert single frame to format expected by count_objects_in_zones
267
+ frame_data = processed_data #[frame_detections]
268
+ zone_analysis = count_objects_in_zones(frame_data, config.zone_config['zones'], stream_info)
269
+
270
+ if zone_analysis:
271
+ enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, processed_data, config)
272
+ # Merge enhanced zone analysis with original zone analysis
273
+ for zone_name, enhanced_data in enhanced_zone_analysis.items():
274
+ zone_analysis[zone_name] = enhanced_data
275
+
276
+ # Adjust counting_summary for zones (current counts based on union across zones)
277
+ per_category_count = {cat: len(self._current_frame_track_ids.get(cat, set())) for cat in self.target_categories}
278
+ counting_summary['per_category_count'] = {k: v for k, v in per_category_count.items() if v > 0}
279
+ counting_summary['total_count'] = sum(per_category_count.values())
280
+
281
+ alerts = self._check_alerts(counting_summary,zone_analysis, frame_number, config)
282
+ predictions = self._extract_predictions(processed_data)
283
+
284
+ incidents_list = self._generate_incidents(counting_summary,zone_analysis, alerts, config, frame_number, stream_info)
285
+ incidents_list = []
286
+ # tracking_stats_list = self._generate_tracking_stats(counting_summary,zone_analysis, alerts, config, frame_number, stream_info)
287
+
288
+ # NEW: Pass parking_analytics to tracking stats generator
289
+ tracking_stats_list = self._generate_tracking_stats(
290
+ counting_summary, zone_analysis, alerts, config,
291
+ frame_number, stream_info, parking_analytics # ADD THIS PARAMETER
292
+ )
293
+
294
+ business_analytics_list = self._generate_business_analytics(counting_summary,zone_analysis, alerts, config, stream_info, is_empty=True)
295
+ summary_list = self._generate_summary(counting_summary,zone_analysis, incidents_list, tracking_stats_list, business_analytics_list, alerts)
296
+
297
+ incidents = incidents_list[0] if incidents_list else {}
298
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
299
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
300
+ summary = summary_list[0] if summary_list else {}
301
+ agg_summary = {str(frame_number): {
302
+ "incidents": incidents,
303
+ "tracking_stats": tracking_stats,
304
+ "business_analytics": business_analytics,
305
+ "alerts": alerts,
306
+ "zone_analysis": zone_analysis,
307
+ "human_text": summary}
308
+ }
309
+
310
+ context.mark_completed()
311
+ result = self.create_result(
312
+ data={"agg_summary": agg_summary},
313
+ usecase=self.name,
314
+ category=self.category,
315
+ context=context
316
+ )
317
+ proc_time = time.time() - processing_start
318
+ processing_latency_ms = proc_time * 1000.0
319
+ processing_fps = (1.0 / proc_time) if proc_time > 0 else None
320
+ # Log the performance metrics using the module-level logger
321
+ print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
322
+ return result
323
+
324
+ def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: ParkingLotAnalyticsConfig) -> Dict[str, Dict[str, Any]]:
325
+ """
326
+ Update zone tracking with current frame data.
327
+
328
+ Args:
329
+ zone_analysis: Current zone analysis results
330
+ detections: List of detections with track IDs
331
+
332
+ Returns:
333
+ Enhanced zone analysis with tracking information
334
+ """
335
+ if not zone_analysis or not config.zone_config or not config.zone_config['zones']:
336
+ return {}
337
+
338
+ enhanced_zone_analysis = {}
339
+ zones = config.zone_config['zones']
340
+
341
+ # Get track to category mapping
342
+ track_to_cat = {det.get('track_id'): det.get('category') for det in detections if det.get('track_id') is not None}
343
+
344
+ # Get current frame track IDs in each zone
345
+ current_frame_zone_tracks = {}
346
+
347
+ # Initialize zone tracking for all zones
348
+ for zone_name in zones.keys():
349
+ current_frame_zone_tracks[zone_name] = set()
350
+ if zone_name not in self._zone_current_track_ids:
351
+ self._zone_current_track_ids[zone_name] = set()
352
+ if zone_name not in self._zone_total_track_ids:
353
+ self._zone_total_track_ids[zone_name] = set()
354
+
355
+ # Check each detection against each zone
356
+ for detection in detections:
357
+ track_id = detection.get("track_id")
358
+ if track_id is None:
359
+ continue
360
+
361
+ # Get detection bbox
362
+ bbox = detection.get("bounding_box", detection.get("bbox"))
363
+ if not bbox:
364
+ continue
365
+
366
+ # Get detection center point
367
+ center_point = get_bbox_bottom25_center(bbox) #get_bbox_center(bbox)
368
+
369
+ # Flag to check if this track is in any zone this frame
370
+ in_any_zone = False
371
+
372
+ # Check which zone this detection is in using actual zone polygons
373
+ for zone_name, zone_polygon in zones.items():
374
+ # Convert polygon points to tuples for point_in_polygon function
375
+ # zone_polygon format: [[x1, y1], [x2, y2], [x3, y3], ...]
376
+ polygon_points = [(point[0], point[1]) for point in zone_polygon]
377
+
378
+ # Check if detection center is inside the zone polygon using ray casting algorithm
379
+ if point_in_polygon(center_point, polygon_points):
380
+ current_frame_zone_tracks[zone_name].add(track_id)
381
+ in_any_zone = True
382
+ if track_id not in self._total_count_list:
383
+ self._total_count_list.append(track_id)
384
+
385
+ # If in any zone, update global current and total (cumulative only if new)
386
+ if in_any_zone:
387
+ cat = track_to_cat.get(track_id)
388
+ if cat:
389
+ # Update current frame global (union across zones)
390
+ self._current_frame_track_ids.setdefault(cat, set()).add(track_id)
391
+
392
+ # Update global cumulative if first time in any zone
393
+ if track_id not in self._tracked_in_zones:
394
+ self._tracked_in_zones.add(track_id)
395
+ self._per_category_total_track_ids.setdefault(cat, set()).add(track_id)
396
+
397
+ # Update zone tracking for each zone
398
+ for zone_name, zone_counts in zone_analysis.items():
399
+ # Get current frame tracks for this zone
400
+ current_tracks = current_frame_zone_tracks.get(zone_name, set())
401
+
402
+ # Update current zone tracks
403
+ self._zone_current_track_ids[zone_name] = current_tracks
404
+
405
+ # Update total zone tracks (accumulate all track IDs that have been in zone)
406
+ self._zone_total_track_ids[zone_name].update(current_tracks)
407
+
408
+ # Update counts
409
+ self._zone_current_counts[zone_name] = len(current_tracks)
410
+ self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
411
+
412
+ # Create enhanced zone analysis
413
+ enhanced_zone_analysis[zone_name] = {
414
+ "current_count": self._zone_current_counts[zone_name],
415
+ "total_count": self._zone_total_counts[zone_name],
416
+ "current_track_ids": list(current_tracks),
417
+ "total_track_ids": list(self._zone_total_track_ids[zone_name]),
418
+ "original_counts": zone_counts # Preserve original zone counts
419
+ }
420
+
421
+ return enhanced_zone_analysis
422
+
423
+ def _normalize_yolo_results(self, data: Any, index_to_category: Optional[Dict[int, str]] = None) -> Any:
424
+ """
425
+ Normalize YOLO-style outputs to internal detection schema:
426
+ - category/category_id: prefer string label using COCO mapping if available
427
+ - confidence: map from 'conf'/'score' to 'confidence'
428
+ - bounding_box: ensure dict with keys (x1,y1,x2,y2) or (xmin,ymin,xmax,ymax)
429
+ - supports list of detections and frame_id -> detections dict
430
+ """
431
+ def to_bbox_dict(d: Dict[str, Any]) -> Dict[str, Any]:
432
+ if "bounding_box" in d and isinstance(d["bounding_box"], dict):
433
+ return d["bounding_box"]
434
+ if "bbox" in d:
435
+ bbox = d["bbox"]
436
+ if isinstance(bbox, dict):
437
+ return bbox
438
+ if isinstance(bbox, (list, tuple)) and len(bbox) >= 4:
439
+ x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
440
+ return {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
441
+ if "xyxy" in d and isinstance(d["xyxy"], (list, tuple)) and len(d["xyxy"]) >= 4:
442
+ x1, y1, x2, y2 = d["xyxy"][0], d["xyxy"][1], d["xyxy"][2], d["xyxy"][3]
443
+ return {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
444
+ if "xywh" in d and isinstance(d["xywh"], (list, tuple)) and len(d["xywh"]) >= 4:
445
+ cx, cy, w, h = d["xywh"][0], d["xywh"][1], d["xywh"][2], d["xywh"][3]
446
+ x1, y1, x2, y2 = cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2
447
+ return {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
448
+ return {}
449
+
450
+ def resolve_category(d: Dict[str, Any]) -> Tuple[str, Optional[int]]:
451
+ raw_cls = d.get("category", d.get("category_id", d.get("class", d.get("cls"))))
452
+ label_name = d.get("name")
453
+ if isinstance(raw_cls, int):
454
+ if index_to_category and raw_cls in index_to_category:
455
+ return index_to_category[raw_cls], raw_cls
456
+ return str(raw_cls), raw_cls
457
+ if isinstance(raw_cls, str):
458
+ # Some YOLO exports provide string labels directly
459
+ return raw_cls, None
460
+ if label_name:
461
+ return str(label_name), None
462
+ return "unknown", None
463
+
464
+ def normalize_det(det: Dict[str, Any]) -> Dict[str, Any]:
465
+ category_name, category_id = resolve_category(det)
466
+ confidence = det.get("confidence", det.get("conf", det.get("score", 0.0)))
467
+ bbox = to_bbox_dict(det)
468
+ normalized = {
469
+ "category": category_name,
470
+ "confidence": confidence,
471
+ "bounding_box": bbox,
472
+ }
473
+ if category_id is not None:
474
+ normalized["category_id"] = category_id
475
+ # Preserve optional fields
476
+ for key in ("track_id", "frame_id", "masks", "segmentation"):
477
+ if key in det:
478
+ normalized[key] = det[key]
479
+ return normalized
480
+
481
+ if isinstance(data, list):
482
+ return [normalize_det(d) if isinstance(d, dict) else d for d in data]
483
+ if isinstance(data, dict):
484
+ # Detect tracking style dict: frame_id -> list of detections
485
+ normalized_dict: Dict[str, Any] = {}
486
+ for k, v in data.items():
487
+ if isinstance(v, list):
488
+ normalized_dict[k] = [normalize_det(d) if isinstance(d, dict) else d for d in v]
489
+ elif isinstance(v, dict):
490
+ normalized_dict[k] = normalize_det(v)
491
+ else:
492
+ normalized_dict[k] = v
493
+ return normalized_dict
494
+ return data
495
+
496
+ def _check_alerts(self, summary: dict, zone_analysis: Dict, frame_number: Any, config: ParkingLotAnalyticsConfig) -> List[Dict]:
497
+ def get_trend(data, lookback=900, threshold=0.6):
498
+ window = data[-lookback:] if len(data) >= lookback else data
499
+ if len(window) < 2:
500
+ return True
501
+ increasing = 0
502
+ total = 0
503
+ for i in range(1, len(window)):
504
+ if window[i] >= window[i - 1]:
505
+ increasing += 1
506
+ total += 1
507
+ ratio = increasing / total
508
+ return ratio >= threshold
509
+
510
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
511
+ alerts = []
512
+ total_detections = summary.get("total_count", 0)
513
+ total_counts_dict = summary.get("total_counts", {})
514
+ per_category_count = summary.get("per_category_count", {})
515
+
516
+ if not config.alert_config:
517
+ return alerts
518
+
519
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
520
+ for category, threshold in config.alert_config.count_thresholds.items():
521
+ if category == "all" and total_detections > threshold:
522
+ alerts.append({
523
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
524
+ "alert_id": f"alert_{category}_{frame_key}",
525
+ "incident_category": self.CASE_TYPE,
526
+ "threshold_level": threshold,
527
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
528
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
529
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
530
+ })
531
+ elif category in per_category_count and per_category_count[category] > threshold:
532
+ alerts.append({
533
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
534
+ "alert_id": f"alert_{category}_{frame_key}",
535
+ "incident_category": self.CASE_TYPE,
536
+ "threshold_level": threshold,
537
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
538
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
539
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
540
+ })
541
+ return alerts
542
+
543
+ def _generate_incidents(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: ParkingLotAnalyticsConfig,
544
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
545
+ incidents = []
546
+ total_detections = counting_summary.get("total_count", 0)
547
+ current_timestamp = self._get_current_timestamp_str(stream_info)
548
+ camera_info = self.get_camera_info_from_stream(stream_info)
549
+
550
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
551
+
552
+ if total_detections > 0:
553
+ level = "low"
554
+ intensity = 5.0
555
+ start_timestamp = self._get_start_timestamp_str(stream_info)
556
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
557
+ self.current_incident_end_timestamp = 'Incident still active'
558
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
559
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
560
+ self.current_incident_end_timestamp = current_timestamp
561
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
562
+ self.current_incident_end_timestamp = 'N/A'
563
+
564
+ if config.alert_config and hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
565
+ threshold = config.alert_config.count_thresholds.get("all", 15)
566
+ intensity = min(10.0, (total_detections / threshold) * 10)
567
+ if intensity >= 9:
568
+ level = "critical"
569
+ self._ascending_alert_list.append(3)
570
+ elif intensity >= 7:
571
+ level = "significant"
572
+ self._ascending_alert_list.append(2)
573
+ elif intensity >= 5:
574
+ level = "medium"
575
+ self._ascending_alert_list.append(1)
576
+ else:
577
+ level = "low"
578
+ self._ascending_alert_list.append(0)
579
+ else:
580
+ if total_detections > 30:
581
+ level = "critical"
582
+ intensity = 10.0
583
+ self._ascending_alert_list.append(3)
584
+ elif total_detections > 25:
585
+ level = "significant"
586
+ intensity = 9.0
587
+ self._ascending_alert_list.append(2)
588
+ elif total_detections > 15:
589
+ level = "medium"
590
+ intensity = 7.0
591
+ self._ascending_alert_list.append(1)
592
+ else:
593
+ level = "low"
594
+ intensity = min(10.0, total_detections / 3.0)
595
+ self._ascending_alert_list.append(0)
596
+
597
+ human_text_lines = [f"VEHICLE INCIDENTS DETECTED @ {current_timestamp}:"]
598
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
599
+ human_text = "\n".join(human_text_lines)
600
+
601
+ alert_settings = []
602
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
603
+ alert_settings.append({
604
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
605
+ "incident_category": self.CASE_TYPE,
606
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
607
+ "ascending": True,
608
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
609
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
610
+ })
611
+
612
+ event = self.create_incident(
613
+ incident_id=f"{self.CASE_TYPE}_{frame_number}",
614
+ incident_type=self.CASE_TYPE,
615
+ severity_level=level,
616
+ human_text=human_text,
617
+ camera_info=camera_info,
618
+ alerts=alerts,
619
+ alert_settings=alert_settings,
620
+ start_time=start_timestamp,
621
+ end_time=self.current_incident_end_timestamp,
622
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
623
+ )
624
+ incidents.append(event)
625
+ else:
626
+ self._ascending_alert_list.append(0)
627
+ incidents.append({})
628
+ return incidents
629
+
630
+ def _generate_tracking_stats(
631
+ self,
632
+ counting_summary: Dict,
633
+ zone_analysis: Dict,
634
+ alerts: List,
635
+ config: ParkingLotAnalyticsConfig,
636
+ frame_number: Optional[int] = None,
637
+ stream_info: Optional[Dict[str, Any]] = None,
638
+ parking_analytics: Optional[Dict] = None # NEW PARAMETER
639
+ ) -> List[Dict]:
640
+ camera_info = self.get_camera_info_from_stream(stream_info)
641
+ tracking_stats = []
642
+ total_detections = counting_summary.get("total_count", 0)
643
+ total_counts_dict = counting_summary.get("total_counts", {})
644
+ per_category_count = counting_summary.get("per_category_count", {})
645
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
646
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
647
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
648
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
649
+
650
+ total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
651
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
652
+
653
+ detections = []
654
+ for detection in counting_summary.get("detections", []):
655
+ bbox = detection.get("bounding_box", {})
656
+ category = detection.get("category", "vehicle")
657
+ if detection.get("masks"):
658
+ segmentation = detection.get("masks", [])
659
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
660
+ elif detection.get("segmentation"):
661
+ segmentation = detection.get("segmentation")
662
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
663
+ elif detection.get("mask"):
664
+ segmentation = detection.get("mask")
665
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
666
+ else:
667
+ detection_obj = self.create_detection_object(category, bbox)
668
+ detections.append(detection_obj)
669
+
670
+ alert_settings = []
671
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
672
+ alert_settings.append({
673
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
674
+ "incident_category": self.CASE_TYPE,
675
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
676
+ "ascending": True,
677
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
678
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
679
+ })
680
+
681
+ # Generate human text similar to people_counting format
682
+ human_text_lines = []
683
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
684
+
685
+ # Display current counts - zone-wise or category-wise
686
+ if zone_analysis:
687
+ human_text_lines.append("\t- Vehicles Detected by Zone:")
688
+ for zone_name, zone_data in zone_analysis.items():
689
+ current_count = 0
690
+ if isinstance(zone_data, dict):
691
+ if "current_count" in zone_data:
692
+ current_count = zone_data.get("current_count", 0)
693
+ else:
694
+ counts_dict = zone_data.get("original_counts") if isinstance(zone_data.get("original_counts"), dict) else zone_data
695
+ current_count = counts_dict.get(
696
+ "total",
697
+ sum(v for v in counts_dict.values() if isinstance(v, (int, float)))
698
+ )
699
+ human_text_lines.append(f"\t\t- {zone_name}: {int(current_count)}")
700
+ else:
701
+ human_text_lines.append(f"\t- Vehicles Detected: {total_detections}")
702
+ if per_category_count:
703
+ for cat, count in per_category_count.items():
704
+ if count > 0:
705
+ human_text_lines.append(f"\t\t- {cat}: {count}")
706
+
707
+ human_text_lines.append("")
708
+ # human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
709
+
710
+ # # Display total counts - zone-wise or category-wise
711
+ # if zone_analysis:
712
+ # human_text_lines.append("\t- Total Vehicles by Zone:")
713
+ # for zone_name, zone_data in zone_analysis.items():
714
+ # total_count = 0
715
+ # if isinstance(zone_data, dict):
716
+ # # Prefer the numeric cumulative total if available
717
+ # if "total_count" in zone_data and isinstance(zone_data.get("total_count"), (int, float)):
718
+ # total_count = zone_data.get("total_count", 0)
719
+ # # Fallback: compute from list of total_track_ids if present
720
+ # elif "total_track_ids" in zone_data and isinstance(zone_data.get("total_track_ids"), list):
721
+ # total_count = len(zone_data.get("total_track_ids", []))
722
+ # else:
723
+ # # Last resort: try to sum numeric values present
724
+ # counts_dict = zone_data if isinstance(zone_data, dict) else {}
725
+ # total_count = sum(v for v in counts_dict.values() if isinstance(v, (int, float)))
726
+ # human_text_lines.append(f"\t\t- {zone_name}: {int(total_count)}")
727
+ # else:
728
+ # if total_counts_dict:
729
+ # human_text_lines.append("\t- Total Unique Vehicles:")
730
+ # for cat, count in total_counts_dict.items():
731
+ # if count > 0:
732
+ # human_text_lines.append(f"\t\t- {cat}: {count}")
733
+
734
+ # # Display alerts
735
+ # if alerts:
736
+ # human_text_lines.append("")
737
+ # for alert in alerts:
738
+ # human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
739
+ # else:
740
+ # human_text_lines.append("")
741
+ # human_text_lines.append("Alerts: None")
742
+
743
+ # NEW: Add parking analytics to human text
744
+ if parking_analytics:
745
+ pa_summary = parking_analytics.get('summary', {})
746
+ total_parked = pa_summary.get('total_parked', 0)
747
+ avg_dwell = pa_summary.get('average_dwell_time', 0)
748
+
749
+ if total_parked > 0:
750
+ human_text_lines.append("")
751
+ human_text_lines.append("PARKING ANALYTICS:")
752
+ human_text_lines.append(f"\t- Parked Vehicles: {total_parked}")
753
+ human_text_lines.append(f"\t- Avg Dwell Time: {avg_dwell}s")
754
+
755
+ longest = pa_summary.get('longest_parked')
756
+ if longest:
757
+ human_text_lines.append(
758
+ f"\t- Longest Parked: {longest['category']} (ID:{longest['track_id']}) "
759
+ f"for {longest['parked_time_seconds']}s"
760
+ )
761
+
762
+ human_text = "\n".join(human_text_lines)
763
+
764
+ reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
765
+ tracking_stat = self.create_tracking_stats(
766
+ total_counts=total_counts,
767
+ current_counts=current_counts,
768
+ detections=detections,
769
+ human_text=human_text,
770
+ camera_info=camera_info,
771
+ alerts=alerts,
772
+ alert_settings=alert_settings,
773
+ reset_settings=reset_settings,
774
+ start_time=high_precision_start_timestamp,
775
+ reset_time=high_precision_reset_timestamp
776
+ )
777
+ tracking_stat['target_categories'] = self.target_categories
778
+
779
+ # NEW: Add parking analytics to output
780
+ if parking_analytics:
781
+ tracking_stat["parking_analytics"] = {
782
+ "per_vehicle": parking_analytics.get("active_vehicles", []),
783
+ "summary": parking_analytics.get("summary", {}),
784
+ "parked_vehicles_count": parking_analytics.get("summary", {}).get("total_parked", 0),
785
+ "average_dwell_time_seconds": parking_analytics.get("summary", {}).get("average_dwell_time", 0.0)
786
+ }
787
+
788
+ self.logger.debug(
789
+ f"Added parking analytics: "
790
+ f"{len(parking_analytics.get('active_vehicles', []))} vehicles"
791
+ )
792
+
793
+
794
+ tracking_stats.append(tracking_stat)
795
+ return tracking_stats
796
+
797
+ def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, alerts: Any, config: ParkingLotAnalyticsConfig,
798
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
799
+ if is_empty:
800
+ return []
801
+
802
+ def _generate_summary(self, summary: dict, zone_analysis: Dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
803
+ """
804
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
805
+ """
806
+ lines = []
807
+ lines.append("Application Name: "+self.CASE_TYPE)
808
+ lines.append("Application Version: "+self.CASE_VERSION)
809
+ if len(incidents) > 0:
810
+ lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
811
+ if len(tracking_stats) > 0:
812
+ lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
813
+ if len(business_analytics) > 0:
814
+ lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
815
+
816
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
817
+ lines.append("Summary: "+"No Summary Data")
818
+
819
+ return ["\n".join(lines)]
820
+
821
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
822
+ frame_track_ids = set()
823
+ for det in detections:
824
+ tid = det.get('track_id')
825
+ if tid is not None:
826
+ frame_track_ids.add(tid)
827
+ total_track_ids = set()
828
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
829
+ total_track_ids.update(s)
830
+ return {
831
+ "total_count": len(total_track_ids),
832
+ "current_frame_count": len(frame_track_ids),
833
+ "total_unique_track_ids": len(total_track_ids),
834
+ "current_frame_track_ids": list(frame_track_ids),
835
+ "last_update_time": time.time(),
836
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
837
+ }
838
+
839
+ def _update_tracking_state(self, detections: list, has_zones: bool = False):
840
+ if not hasattr(self, "_per_category_total_track_ids"):
841
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
842
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
843
+
844
+ for det in detections:
845
+ cat = det.get("category")
846
+ raw_track_id = det.get("track_id")
847
+ if cat not in self.target_categories or raw_track_id is None:
848
+ continue
849
+ bbox = det.get("bounding_box", det.get("bbox"))
850
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
851
+ det["track_id"] = canonical_id
852
+ if not has_zones:
853
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
854
+ # For current frame, add unconditionally here; will be overridden/adjusted if has_zones in _update_zone_tracking
855
+ self._current_frame_track_ids.setdefault(cat, set()).add(canonical_id)
856
+
857
+ def get_total_counts(self):
858
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
859
+
860
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
861
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
862
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
863
+
864
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
865
+ hours = int(timestamp // 3600)
866
+ minutes = int((timestamp % 3600) // 60)
867
+ seconds = round(float(timestamp % 60), 2)
868
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
869
+
870
+ def _format_timestamp(self, timestamp: Any) -> str:
871
+ """Format a timestamp to match the current timestamp format: YYYY:MM:DD HH:MM:SS.
872
+
873
+ The input can be either:
874
+ 1. A numeric Unix timestamp (``float`` / ``int``) – it will be converted to datetime.
875
+ 2. A string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
876
+
877
+ The returned value will be in the format: YYYY:MM:DD HH:MM:SS (no milliseconds, no UTC suffix).
878
+
879
+ Example
880
+ -------
881
+ >>> self._format_timestamp("2025-10-27-19:31:20.187574 UTC")
882
+ '2025:10:27 19:31:20'
883
+ """
884
+
885
+ # Convert numeric timestamps to datetime first
886
+ if isinstance(timestamp, (int, float)):
887
+ dt = datetime.fromtimestamp(timestamp, timezone.utc)
888
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
889
+
890
+ # Ensure we are working with a string from here on
891
+ if not isinstance(timestamp, str):
892
+ return str(timestamp)
893
+
894
+ # Remove ' UTC' suffix if present
895
+ timestamp_clean = timestamp.replace(' UTC', '').strip()
896
+
897
+ # Remove milliseconds if present (everything after the last dot)
898
+ if '.' in timestamp_clean:
899
+ timestamp_clean = timestamp_clean.split('.')[0]
900
+
901
+ # Parse the timestamp string and convert to desired format
902
+ try:
903
+ # Handle format: YYYY-MM-DD-HH:MM:SS
904
+ if timestamp_clean.count('-') >= 2:
905
+ # Replace first two dashes with colons for date part, third with space
906
+ parts = timestamp_clean.split('-')
907
+ if len(parts) >= 4:
908
+ # parts = ['2025', '10', '27', '19:31:20']
909
+ formatted = f"{parts[0]}:{parts[1]}:{parts[2]} {'-'.join(parts[3:])}"
910
+ return formatted
911
+ except Exception:
912
+ pass
913
+
914
+ # If parsing fails, return the cleaned string as-is
915
+ return timestamp_clean
916
+
917
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
918
+ """Get formatted current timestamp based on stream type."""
919
+
920
+ if not stream_info:
921
+ return "00:00:00.00"
922
+ if precision:
923
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
924
+ if frame_id:
925
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
926
+ else:
927
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
928
+ stream_time_str = self._format_timestamp_for_video(start_time)
929
+
930
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
931
+ else:
932
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
933
+
934
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
935
+ if frame_id:
936
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
937
+ else:
938
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
939
+
940
+ stream_time_str = self._format_timestamp_for_video(start_time)
941
+
942
+
943
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
944
+ else:
945
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
946
+ if stream_time_str:
947
+ try:
948
+ timestamp_str = stream_time_str.replace(" UTC", "")
949
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
950
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
951
+ return self._format_timestamp_for_stream(timestamp)
952
+ except:
953
+ return self._format_timestamp_for_stream(time.time())
954
+ else:
955
+ return self._format_timestamp_for_stream(time.time())
956
+
957
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
958
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
959
+ if not stream_info:
960
+ return "00:00:00"
961
+
962
+ if precision:
963
+ if self.start_timer is None:
964
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
965
+ if not candidate or candidate == "NA":
966
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
967
+ self.start_timer = candidate
968
+ return self._format_timestamp(self.start_timer)
969
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
970
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
971
+ if not candidate or candidate == "NA":
972
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
973
+ self.start_timer = candidate
974
+ return self._format_timestamp(self.start_timer)
975
+ else:
976
+ return self._format_timestamp(self.start_timer)
977
+
978
+ if self.start_timer is None:
979
+ # Prefer direct input_settings.stream_time if available and not NA
980
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
981
+ if not candidate or candidate == "NA":
982
+ # Fallback to nested stream_info.stream_time used by current timestamp path
983
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
984
+ if stream_time_str:
985
+ try:
986
+ timestamp_str = stream_time_str.replace(" UTC", "")
987
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
988
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
989
+ candidate = datetime.fromtimestamp(self._tracking_start_time, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
990
+ except:
991
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
992
+ else:
993
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
994
+ self.start_timer = candidate
995
+ return self._format_timestamp(self.start_timer)
996
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
997
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
998
+ if not candidate or candidate == "NA":
999
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1000
+ if stream_time_str:
1001
+ try:
1002
+ timestamp_str = stream_time_str.replace(" UTC", "")
1003
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1004
+ ts = dt.replace(tzinfo=timezone.utc).timestamp()
1005
+ candidate = datetime.fromtimestamp(ts, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1006
+ except:
1007
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1008
+ else:
1009
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1010
+ self.start_timer = candidate
1011
+ return self._format_timestamp(self.start_timer)
1012
+
1013
+ else:
1014
+ if self.start_timer is not None and self.start_timer != "NA":
1015
+ return self._format_timestamp(self.start_timer)
1016
+
1017
+ if self._tracking_start_time is None:
1018
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1019
+ if stream_time_str:
1020
+ try:
1021
+ timestamp_str = stream_time_str.replace(" UTC", "")
1022
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1023
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1024
+ except:
1025
+ self._tracking_start_time = time.time()
1026
+ else:
1027
+ self._tracking_start_time = time.time()
1028
+
1029
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1030
+ dt = dt.replace(minute=0, second=0, microsecond=0)
1031
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1032
+
1033
+ def _count_categories(self, detections: list, config: ParkingLotAnalyticsConfig) -> dict:
1034
+ counts = {}
1035
+ for det in detections:
1036
+ cat = det.get('category', 'unknown')
1037
+ counts[cat] = counts.get(cat, 0) + 1
1038
+ return {
1039
+ "total_count": sum(counts.values()),
1040
+ "per_category_count": counts,
1041
+ "detections": [
1042
+ {
1043
+ "bounding_box": det.get("bounding_box"),
1044
+ "category": det.get("category"),
1045
+ "confidence": det.get("confidence"),
1046
+ "track_id": det.get("track_id"),
1047
+ "frame_id": det.get("frame_id")
1048
+ }
1049
+ for det in detections
1050
+ ]
1051
+ }
1052
+
1053
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
1054
+ return [
1055
+ {
1056
+ "category": det.get("category", "unknown"),
1057
+ "confidence": det.get("confidence", 0.0),
1058
+ "bounding_box": det.get("bounding_box", {})
1059
+ }
1060
+ for det in detections
1061
+ ]
1062
+
1063
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
1064
+ def _bbox_to_list(bbox):
1065
+ if bbox is None:
1066
+ return []
1067
+ if isinstance(bbox, list):
1068
+ return bbox[:4] if len(bbox) >= 4 else []
1069
+ if isinstance(bbox, dict):
1070
+ if "xmin" in bbox:
1071
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
1072
+ if "x1" in bbox:
1073
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
1074
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
1075
+ return values[:4] if len(values) >= 4 else []
1076
+ return []
1077
+
1078
+ l1 = _bbox_to_list(box1)
1079
+ l2 = _bbox_to_list(box2)
1080
+ if len(l1) < 4 or len(l2) < 4:
1081
+ return 0.0
1082
+ x1_min, y1_min, x1_max, y1_max = l1
1083
+ x2_min, y2_min, x2_max, y2_max = l2
1084
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
1085
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
1086
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
1087
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
1088
+ inter_x_min = max(x1_min, x2_min)
1089
+ inter_y_min = max(y1_min, y2_min)
1090
+ inter_x_max = min(x1_max, x2_max)
1091
+ inter_y_max = min(y1_max, y2_max)
1092
+ inter_w = max(0.0, inter_x_max - inter_x_min)
1093
+ inter_h = max(0.0, inter_y_max - inter_y_min)
1094
+ inter_area = inter_w * inter_h
1095
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
1096
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
1097
+ union_area = area1 + area2 - inter_area
1098
+ return (inter_area / union_area) if union_area > 0 else 0.0
1099
+
1100
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
1101
+ if raw_id is None or bbox is None:
1102
+ return raw_id
1103
+ now = time.time()
1104
+ if raw_id in self._track_aliases:
1105
+ canonical_id = self._track_aliases[raw_id]
1106
+ track_info = self._canonical_tracks.get(canonical_id)
1107
+ if track_info is not None:
1108
+ track_info["last_bbox"] = bbox
1109
+ track_info["last_update"] = now
1110
+ track_info["raw_ids"].add(raw_id)
1111
+ return canonical_id
1112
+ for canonical_id, info in self._canonical_tracks.items():
1113
+ if now - info["last_update"] > self._track_merge_time_window:
1114
+ continue
1115
+ iou = self._compute_iou(bbox, info["last_bbox"])
1116
+ if iou >= self._track_merge_iou_threshold:
1117
+ self._track_aliases[raw_id] = canonical_id
1118
+ info["last_bbox"] = bbox
1119
+ info["last_update"] = now
1120
+ info["raw_ids"].add(raw_id)
1121
+ return canonical_id
1122
+ canonical_id = raw_id
1123
+ self._track_aliases[raw_id] = canonical_id
1124
+ self._canonical_tracks[canonical_id] = {
1125
+ "last_bbox": bbox,
1126
+ "last_update": now,
1127
+ "raw_ids": {raw_id},
1128
+ }
1129
+ return canonical_id
1130
+
1131
+ def _get_tracking_start_time(self) -> str:
1132
+ if self._tracking_start_time is None:
1133
+ return "N/A"
1134
+ return self._format_timestamp(self._tracking_start_time)
1135
+
1136
+ def _set_tracking_start_time(self) -> None:
1137
+ self._tracking_start_time = time.time()