matrice-analytics 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-analytics might be problematic. Click here for more details.

Files changed (160) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +142 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3188 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +681 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +1870 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +339 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +283 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +248 -0
  35. matrice_analytics/post_processing/ocr/postprocessing.py +271 -0
  36. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  37. matrice_analytics/post_processing/post_processor.py +1153 -0
  38. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  39. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  40. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  41. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  42. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  43. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  44. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  45. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  46. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  47. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  48. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  49. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  50. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  51. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  52. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  53. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  54. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  55. matrice_analytics/post_processing/usecases/age_gender_detection.py +1043 -0
  56. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  57. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  58. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  59. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  60. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  61. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  62. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  63. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  64. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  65. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  66. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  67. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  68. matrice_analytics/post_processing/usecases/color/clip.py +232 -0
  69. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  70. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  71. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  72. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  73. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  74. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  75. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  76. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  77. matrice_analytics/post_processing/usecases/color_detection.py +1835 -0
  78. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  79. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  80. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  81. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  82. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  83. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  84. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +930 -0
  85. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  86. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  87. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  88. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  89. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  90. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  91. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  92. matrice_analytics/post_processing/usecases/fire_detection.py +1112 -0
  93. matrice_analytics/post_processing/usecases/flare_analysis.py +891 -0
  94. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  95. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  96. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  97. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  98. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  99. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  100. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  101. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  102. matrice_analytics/post_processing/usecases/license_plate_detection.py +914 -0
  103. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1194 -0
  104. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  105. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  106. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  107. matrice_analytics/post_processing/usecases/parking.py +787 -0
  108. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  109. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  110. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  111. matrice_analytics/post_processing/usecases/people_counting.py +1728 -0
  112. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  113. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  114. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  115. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  116. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  117. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  118. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  119. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  120. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  121. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  122. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  123. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  124. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  125. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  126. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  127. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  128. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  129. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  130. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  131. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  132. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  133. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  134. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +950 -0
  135. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  136. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  137. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  138. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  139. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  140. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  141. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  142. matrice_analytics/post_processing/utils/__init__.py +150 -0
  143. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  144. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  145. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  146. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  147. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  148. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  149. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  150. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  151. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  152. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  153. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  154. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  155. matrice_analytics/py.typed +0 -0
  156. matrice_analytics-0.1.2.dist-info/METADATA +481 -0
  157. matrice_analytics-0.1.2.dist-info/RECORD +160 -0
  158. matrice_analytics-0.1.2.dist-info/WHEEL +5 -0
  159. matrice_analytics-0.1.2.dist-info/licenses/LICENSE.txt +21 -0
  160. matrice_analytics-0.1.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,930 @@
1
+ from typing import Any, Dict, List, Optional, Tuple
2
+ from dataclasses import asdict
3
+ import time
4
+ from datetime import datetime, timezone
5
+
6
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
7
+ from ..utils import (
8
+ filter_by_confidence,
9
+ filter_by_categories,
10
+ apply_category_mapping,
11
+ count_objects_by_category,
12
+ count_objects_in_zones,
13
+ calculate_counting_summary,
14
+ match_results_structure,
15
+ bbox_smoothing,
16
+ BBoxSmoothingConfig,
17
+ BBoxSmoothingTracker
18
+ )
19
+ from dataclasses import dataclass, field
20
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
21
+ from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
22
+
23
+ @dataclass
24
+ class VehiclePeopleDroneMonitoringConfig(BaseConfig):
25
+ """Configuration for vehicle detection use case in vehicle monitoring."""
26
+ enable_smoothing: bool = True
27
+ smoothing_algorithm: str = "observability"
28
+ smoothing_window_size: int = 20
29
+ smoothing_cooldown_frames: int = 5
30
+ smoothing_confidence_range_factor: float = 0.5
31
+ confidence_threshold: float = 0.6
32
+
33
+ #JBK_720_GATE POLYGON = [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]]
34
+ zone_config: Optional[Dict[str, Dict[str, List[List[float]]]]] = None #field(
35
+ # default_factory=lambda: {
36
+ # "zones": {
37
+ # "Interest_Region": [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]],
38
+ # }
39
+ # }
40
+ # )
41
+ usecase_categories: List[str] = field(
42
+ default_factory=lambda: [
43
+ "pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"
44
+ ]
45
+ )
46
+ target_categories: List[str] = field(
47
+ default_factory=lambda: [
48
+ "pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"]
49
+ )
50
+ alert_config: Optional[AlertConfig] = None
51
+ index_to_category: Optional[Dict[int, str]] = field(
52
+ default_factory=lambda: {
53
+ 0: "pedestrian", 1: "people", 2: "bicycle", 3: "car", 4: "van", 5: "truck",
54
+ 6: "tricycle", 7: "awning-tricycle", 8: "bus", 9: "motor"
55
+ }
56
+ )
57
+
58
+ class DroneTrafficMonitoringUsecase(BaseProcessor):
59
+ CATEGORY_DISPLAY = {
60
+ # Focus on vehicle-related COCO classes
61
+ "pedestrian": "Pesestrian",
62
+ "people": "People",
63
+ "bicycle": "Bicycle",
64
+ "car": "Car",
65
+ "van": "Van",
66
+ "truck": "Truck",
67
+ "tricycle": "Tricycle",
68
+ "awning-tricycle": "Awning-Tricycle",
69
+ "bus": "Bus",
70
+ "motor": "Motorcycle"
71
+ }
72
+
73
+ def __init__(self):
74
+ super().__init__("drone_traffic_monitoring")
75
+ self.category = "traffic"
76
+ self.CASE_TYPE: Optional[str] = 'drone_traffic_monitoring'
77
+ self.CASE_VERSION: Optional[str] = '1.0'
78
+ self.target_categories = ["pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"]
79
+ self.smoothing_tracker = None
80
+ self.tracker = None
81
+ self._total_frame_counter = 0
82
+ self._global_frame_offset = 0
83
+ self._tracking_start_time = None
84
+ self._track_aliases: Dict[Any, Any] = {}
85
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
86
+ self._track_merge_iou_threshold: float = 0.05
87
+ self._track_merge_time_window: float = 7.0
88
+ self._ascending_alert_list: List[int] = []
89
+ self.current_incident_end_timestamp: str = "N/A"
90
+ self.start_timer = None
91
+
92
+ # Track ID storage for total count calculation
93
+ self._total_track_ids = set() # Store all unique track IDs seen across calls
94
+ self._current_frame_track_ids = set() # Store track IDs from current frame
95
+ self._total_count = 0 # Cached total count
96
+ self._last_update_time = time.time() # Track when last updated
97
+ self._total_count_list = []
98
+
99
+ # Zone-based tracking storage
100
+ self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
101
+ self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
102
+ self._zone_current_counts = {} # zone_name -> current count in zone
103
+ self._zone_total_counts = {} # zone_name -> total count that have been in zone
104
+
105
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
106
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
107
+ processing_start = time.time()
108
+ if not isinstance(config, VehiclePeopleDroneMonitoringConfig):
109
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
110
+ if context is None:
111
+ context = ProcessingContext()
112
+
113
+ # Normalize typical YOLO outputs (COCO pretrained) to internal schema
114
+ data = self._normalize_yolo_results(data, getattr(config, 'index_to_category', None))
115
+
116
+ input_format = match_results_structure(data)
117
+ context.input_format = input_format
118
+ context.confidence_threshold = config.confidence_threshold
119
+ config.confidence_threshold = 0.25
120
+
121
+ if config.confidence_threshold is not None:
122
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
123
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
124
+ else:
125
+ processed_data = data
126
+ self.logger.debug("Did not apply confidence filtering since no threshold provided")
127
+
128
+ if config.index_to_category:
129
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
130
+ self.logger.debug("Applied category mapping")
131
+
132
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
133
+ if config.target_categories:
134
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
135
+ self.logger.debug("Applied category filtering")
136
+
137
+
138
+ if config.enable_smoothing:
139
+ if self.smoothing_tracker is None:
140
+ smoothing_config = BBoxSmoothingConfig(
141
+ smoothing_algorithm=config.smoothing_algorithm,
142
+ window_size=config.smoothing_window_size,
143
+ cooldown_frames=config.smoothing_cooldown_frames,
144
+ confidence_threshold=config.confidence_threshold,
145
+ confidence_range_factor=config.smoothing_confidence_range_factor,
146
+ enable_smoothing=True
147
+ )
148
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
149
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
150
+
151
+ try:
152
+ from ..advanced_tracker import AdvancedTracker
153
+ from ..advanced_tracker.config import TrackerConfig
154
+ if self.tracker is None:
155
+ tracker_config = TrackerConfig()
156
+ self.tracker = AdvancedTracker(tracker_config)
157
+ self.logger.info("Initialized AdvancedTracker for Vehicle Monitoring")
158
+ processed_data = self.tracker.update(processed_data)
159
+ except Exception as e:
160
+ self.logger.warning(f"AdvancedTracker failed: {e}")
161
+
162
+ self._update_tracking_state(processed_data)
163
+ self._total_frame_counter += 1
164
+
165
+ frame_number = None
166
+ if stream_info:
167
+ input_settings = stream_info.get("input_settings", {})
168
+ start_frame = input_settings.get("start_frame")
169
+ end_frame = input_settings.get("end_frame")
170
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
171
+ frame_number = start_frame
172
+
173
+ general_counting_summary = calculate_counting_summary(data)
174
+ counting_summary = self._count_categories(processed_data, config)
175
+ total_counts = self.get_total_counts()
176
+ counting_summary['total_counts'] = total_counts
177
+ counting_summary['categories'] = {}
178
+ for detection in processed_data:
179
+ category = detection.get("category", "unknown")
180
+ counting_summary["categories"][category] = counting_summary["categories"].get(category, 0) + 1
181
+
182
+ zone_analysis = {}
183
+ if config.zone_config and config.zone_config['zones']:
184
+ # Convert single frame to format expected by count_objects_in_zones
185
+ frame_data = processed_data #[frame_detections]
186
+ zone_analysis = count_objects_in_zones(frame_data, config.zone_config['zones'], stream_info)
187
+
188
+ if zone_analysis:
189
+ enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, processed_data, config)
190
+ # Merge enhanced zone analysis with original zone analysis
191
+ for zone_name, enhanced_data in enhanced_zone_analysis.items():
192
+ zone_analysis[zone_name] = enhanced_data
193
+
194
+
195
+ alerts = self._check_alerts(counting_summary,zone_analysis, frame_number, config)
196
+ predictions = self._extract_predictions(processed_data)
197
+
198
+ incidents_list = self._generate_incidents(counting_summary,zone_analysis, alerts, config, frame_number, stream_info)
199
+ incidents_list = []
200
+ tracking_stats_list = self._generate_tracking_stats(counting_summary,zone_analysis, alerts, config, frame_number, stream_info)
201
+
202
+ business_analytics_list = self._generate_business_analytics(counting_summary,zone_analysis, alerts, config, stream_info, is_empty=True)
203
+ summary_list = self._generate_summary(counting_summary,zone_analysis, incidents_list, tracking_stats_list, business_analytics_list, alerts)
204
+
205
+ incidents = incidents_list[0] if incidents_list else {}
206
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
207
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
208
+ summary = summary_list[0] if summary_list else {}
209
+ agg_summary = {str(frame_number): {
210
+ "incidents": incidents,
211
+ "tracking_stats": tracking_stats,
212
+ "business_analytics": business_analytics,
213
+ "alerts": alerts,
214
+ "zone_analysis": zone_analysis,
215
+ "human_text": summary}
216
+ }
217
+
218
+ context.mark_completed()
219
+ result = self.create_result(
220
+ data={"agg_summary": agg_summary},
221
+ usecase=self.name,
222
+ category=self.category,
223
+ context=context
224
+ )
225
+ proc_time = time.time() - processing_start
226
+ processing_latency_ms = proc_time * 1000.0
227
+ processing_fps = (1.0 / proc_time) if proc_time > 0 else None
228
+ # Log the performance metrics using the module-level logger
229
+ print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
230
+ return result
231
+
232
+ def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: VehiclePeopleDroneMonitoringConfig) -> Dict[str, Dict[str, Any]]:
233
+ """
234
+ Update zone tracking with current frame data.
235
+
236
+ Args:
237
+ zone_analysis: Current zone analysis results
238
+ detections: List of detections with track IDs
239
+
240
+ Returns:
241
+ Enhanced zone analysis with tracking information
242
+ """
243
+ if not zone_analysis or not config.zone_config or not config.zone_config['zones']:
244
+ return {}
245
+
246
+ enhanced_zone_analysis = {}
247
+ zones = config.zone_config['zones']
248
+
249
+ # Get current frame track IDs in each zone
250
+ current_frame_zone_tracks = {}
251
+
252
+ # Initialize zone tracking for all zones
253
+ for zone_name in zones.keys():
254
+ current_frame_zone_tracks[zone_name] = set()
255
+ if zone_name not in self._zone_current_track_ids:
256
+ self._zone_current_track_ids[zone_name] = set()
257
+ if zone_name not in self._zone_total_track_ids:
258
+ self._zone_total_track_ids[zone_name] = set()
259
+
260
+ # Check each detection against each zone
261
+ for detection in detections:
262
+ track_id = detection.get("track_id")
263
+ if track_id is None:
264
+ continue
265
+
266
+ # Get detection bbox
267
+ bbox = detection.get("bounding_box", detection.get("bbox"))
268
+ if not bbox:
269
+ continue
270
+
271
+ # Get detection center point
272
+ center_point = get_bbox_bottom25_center(bbox) #get_bbox_center(bbox)
273
+
274
+ # Check which zone this detection is in using actual zone polygons
275
+ for zone_name, zone_polygon in zones.items():
276
+ # Convert polygon points to tuples for point_in_polygon function
277
+ # zone_polygon format: [[x1, y1], [x2, y2], [x3, y3], ...]
278
+ polygon_points = [(point[0], point[1]) for point in zone_polygon]
279
+
280
+ # Check if detection center is inside the zone polygon using ray casting algorithm
281
+ if point_in_polygon(center_point, polygon_points):
282
+ current_frame_zone_tracks[zone_name].add(track_id)
283
+ if track_id not in self._total_count_list:
284
+ self._total_count_list.append(track_id)
285
+
286
+ # Update zone tracking for each zone
287
+ for zone_name, zone_counts in zone_analysis.items():
288
+ # Get current frame tracks for this zone
289
+ current_tracks = current_frame_zone_tracks.get(zone_name, set())
290
+
291
+ # Update current zone tracks
292
+ self._zone_current_track_ids[zone_name] = current_tracks
293
+
294
+ # Update total zone tracks (accumulate all track IDs that have been in this zone)
295
+ self._zone_total_track_ids[zone_name].update(current_tracks)
296
+
297
+ # Update counts
298
+ self._zone_current_counts[zone_name] = len(current_tracks)
299
+ self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
300
+
301
+ # Create enhanced zone analysis
302
+ enhanced_zone_analysis[zone_name] = {
303
+ "current_count": self._zone_current_counts[zone_name],
304
+ "total_count": self._zone_total_counts[zone_name],
305
+ "current_track_ids": list(current_tracks),
306
+ "total_track_ids": list(self._zone_total_track_ids[zone_name]),
307
+ "original_counts": zone_counts # Preserve original zone counts
308
+ }
309
+
310
+ return enhanced_zone_analysis
311
+
312
+ def _normalize_yolo_results(self, data: Any, index_to_category: Optional[Dict[int, str]] = None) -> Any:
313
+ """
314
+ Normalize YOLO-style outputs to internal detection schema:
315
+ - category/category_id: prefer string label using COCO mapping if available
316
+ - confidence: map from 'conf'/'score' to 'confidence'
317
+ - bounding_box: ensure dict with keys (x1,y1,x2,y2) or (xmin,ymin,xmax,ymax)
318
+ - supports list of detections and frame_id -> detections dict
319
+ """
320
+ def to_bbox_dict(d: Dict[str, Any]) -> Dict[str, Any]:
321
+ if "bounding_box" in d and isinstance(d["bounding_box"], dict):
322
+ return d["bounding_box"]
323
+ if "bbox" in d:
324
+ bbox = d["bbox"]
325
+ if isinstance(bbox, dict):
326
+ return bbox
327
+ if isinstance(bbox, (list, tuple)) and len(bbox) >= 4:
328
+ x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
329
+ return {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
330
+ if "xyxy" in d and isinstance(d["xyxy"], (list, tuple)) and len(d["xyxy"]) >= 4:
331
+ x1, y1, x2, y2 = d["xyxy"][0], d["xyxy"][1], d["xyxy"][2], d["xyxy"][3]
332
+ return {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
333
+ if "xywh" in d and isinstance(d["xywh"], (list, tuple)) and len(d["xywh"]) >= 4:
334
+ cx, cy, w, h = d["xywh"][0], d["xywh"][1], d["xywh"][2], d["xywh"][3]
335
+ x1, y1, x2, y2 = cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2
336
+ return {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
337
+ return {}
338
+
339
+ def resolve_category(d: Dict[str, Any]) -> Tuple[str, Optional[int]]:
340
+ raw_cls = d.get("category", d.get("category_id", d.get("class", d.get("cls"))))
341
+ label_name = d.get("name")
342
+ if isinstance(raw_cls, int):
343
+ if index_to_category and raw_cls in index_to_category:
344
+ return index_to_category[raw_cls], raw_cls
345
+ return str(raw_cls), raw_cls
346
+ if isinstance(raw_cls, str):
347
+ # Some YOLO exports provide string labels directly
348
+ return raw_cls, None
349
+ if label_name:
350
+ return str(label_name), None
351
+ return "unknown", None
352
+
353
+ def normalize_det(det: Dict[str, Any]) -> Dict[str, Any]:
354
+ category_name, category_id = resolve_category(det)
355
+ confidence = det.get("confidence", det.get("conf", det.get("score", 0.0)))
356
+ bbox = to_bbox_dict(det)
357
+ normalized = {
358
+ "category": category_name,
359
+ "confidence": confidence,
360
+ "bounding_box": bbox,
361
+ }
362
+ if category_id is not None:
363
+ normalized["category_id"] = category_id
364
+ # Preserve optional fields
365
+ for key in ("track_id", "frame_id", "masks", "segmentation"):
366
+ if key in det:
367
+ normalized[key] = det[key]
368
+ return normalized
369
+
370
+ if isinstance(data, list):
371
+ return [normalize_det(d) if isinstance(d, dict) else d for d in data]
372
+ if isinstance(data, dict):
373
+ # Detect tracking style dict: frame_id -> list of detections
374
+ normalized_dict: Dict[str, Any] = {}
375
+ for k, v in data.items():
376
+ if isinstance(v, list):
377
+ normalized_dict[k] = [normalize_det(d) if isinstance(d, dict) else d for d in v]
378
+ elif isinstance(v, dict):
379
+ normalized_dict[k] = normalize_det(v)
380
+ else:
381
+ normalized_dict[k] = v
382
+ return normalized_dict
383
+ return data
384
+
385
+ def _check_alerts(self, summary: dict, zone_analysis: Dict, frame_number: Any, config: VehiclePeopleDroneMonitoringConfig) -> List[Dict]:
386
+ def get_trend(data, lookback=900, threshold=0.6):
387
+ window = data[-lookback:] if len(data) >= lookback else data
388
+ if len(window) < 2:
389
+ return True
390
+ increasing = 0
391
+ total = 0
392
+ for i in range(1, len(window)):
393
+ if window[i] >= window[i - 1]:
394
+ increasing += 1
395
+ total += 1
396
+ ratio = increasing / total
397
+ return ratio >= threshold
398
+
399
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
400
+ alerts = []
401
+ total_detections = summary.get("total_count", 0)
402
+ total_counts_dict = summary.get("total_counts", {})
403
+ per_category_count = summary.get("per_category_count", {})
404
+
405
+ if not config.alert_config:
406
+ return alerts
407
+
408
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
409
+ for category, threshold in config.alert_config.count_thresholds.items():
410
+ if category == "all" and total_detections > threshold:
411
+ alerts.append({
412
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
413
+ "alert_id": f"alert_{category}_{frame_key}",
414
+ "incident_category": self.CASE_TYPE,
415
+ "threshold_level": threshold,
416
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
417
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
418
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
419
+ })
420
+ elif category in per_category_count and per_category_count[category] > threshold:
421
+ alerts.append({
422
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
423
+ "alert_id": f"alert_{category}_{frame_key}",
424
+ "incident_category": self.CASE_TYPE,
425
+ "threshold_level": threshold,
426
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
427
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
428
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
429
+ })
430
+ return alerts
431
+
432
+ def _generate_incidents(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: VehiclePeopleDroneMonitoringConfig,
433
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
434
+ incidents = []
435
+ total_detections = counting_summary.get("total_count", 0)
436
+ current_timestamp = self._get_current_timestamp_str(stream_info)
437
+ camera_info = self.get_camera_info_from_stream(stream_info)
438
+
439
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
440
+
441
+ if total_detections > 0:
442
+ level = "low"
443
+ intensity = 5.0
444
+ start_timestamp = self._get_start_timestamp_str(stream_info)
445
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
446
+ self.current_incident_end_timestamp = 'Incident still active'
447
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
448
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
449
+ self.current_incident_end_timestamp = current_timestamp
450
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
451
+ self.current_incident_end_timestamp = 'N/A'
452
+
453
+ if config.alert_config and hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
454
+ threshold = config.alert_config.count_thresholds.get("all", 15)
455
+ intensity = min(10.0, (total_detections / threshold) * 10)
456
+ if intensity >= 9:
457
+ level = "critical"
458
+ self._ascending_alert_list.append(3)
459
+ elif intensity >= 7:
460
+ level = "significant"
461
+ self._ascending_alert_list.append(2)
462
+ elif intensity >= 5:
463
+ level = "medium"
464
+ self._ascending_alert_list.append(1)
465
+ else:
466
+ level = "low"
467
+ self._ascending_alert_list.append(0)
468
+ else:
469
+ if total_detections > 30:
470
+ level = "critical"
471
+ intensity = 10.0
472
+ self._ascending_alert_list.append(3)
473
+ elif total_detections > 25:
474
+ level = "significant"
475
+ intensity = 9.0
476
+ self._ascending_alert_list.append(2)
477
+ elif total_detections > 15:
478
+ level = "medium"
479
+ intensity = 7.0
480
+ self._ascending_alert_list.append(1)
481
+ else:
482
+ level = "low"
483
+ intensity = min(10.0, total_detections / 3.0)
484
+ self._ascending_alert_list.append(0)
485
+
486
+ human_text_lines = [f"VEHICLE INCIDENTS DETECTED @ {current_timestamp}:"]
487
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
488
+ human_text = "\n".join(human_text_lines)
489
+
490
+ alert_settings = []
491
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
492
+ alert_settings.append({
493
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
494
+ "incident_category": self.CASE_TYPE,
495
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
496
+ "ascending": True,
497
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
498
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
499
+ })
500
+
501
+ event = self.create_incident(
502
+ incident_id=f"{self.CASE_TYPE}_{frame_number}",
503
+ incident_type=self.CASE_TYPE,
504
+ severity_level=level,
505
+ human_text=human_text,
506
+ camera_info=camera_info,
507
+ alerts=alerts,
508
+ alert_settings=alert_settings,
509
+ start_time=start_timestamp,
510
+ end_time=self.current_incident_end_timestamp,
511
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
512
+ )
513
+ incidents.append(event)
514
+ else:
515
+ self._ascending_alert_list.append(0)
516
+ incidents.append({})
517
+ return incidents
518
+
519
+ def _generate_tracking_stats(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: VehiclePeopleDroneMonitoringConfig,
520
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
521
+ camera_info = self.get_camera_info_from_stream(stream_info)
522
+ tracking_stats = []
523
+ total_detections = counting_summary.get("total_count", 0)
524
+ total_counts_dict = counting_summary.get("total_counts", {})
525
+ per_category_count = counting_summary.get("per_category_count", {})
526
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
527
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
528
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
529
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
530
+
531
+ total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
532
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
533
+
534
+ detections = []
535
+ for detection in counting_summary.get("detections", []):
536
+ bbox = detection.get("bounding_box", {})
537
+ category = detection.get("category", "vehicle")
538
+ if detection.get("masks"):
539
+ segmentation = detection.get("masks", [])
540
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
541
+ elif detection.get("segmentation"):
542
+ segmentation = detection.get("segmentation")
543
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
544
+ elif detection.get("mask"):
545
+ segmentation = detection.get("mask")
546
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
547
+ else:
548
+ detection_obj = self.create_detection_object(category, bbox)
549
+ detections.append(detection_obj)
550
+
551
+ alert_settings = []
552
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
553
+ alert_settings.append({
554
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
555
+ "incident_category": self.CASE_TYPE,
556
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
557
+ "ascending": True,
558
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
559
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
560
+ })
561
+
562
+ human_text_lines = [f"Tracking Statistics:"]
563
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
564
+ # Append zone-wise current counts if available
565
+ if zone_analysis:
566
+ human_text_lines.append("\tZones (current):")
567
+ for zone_name, zone_data in zone_analysis.items():
568
+ current_count = 0
569
+ if isinstance(zone_data, dict):
570
+ if "current_count" in zone_data:
571
+ current_count = zone_data.get("current_count", 0)
572
+ else:
573
+ counts_dict = zone_data.get("original_counts") if isinstance(zone_data.get("original_counts"), dict) else zone_data
574
+ current_count = counts_dict.get(
575
+ "total",
576
+ sum(v for v in counts_dict.values() if isinstance(v, (int, float)))
577
+ )
578
+ human_text_lines.append(f"\t{zone_name}: {int(current_count)}")
579
+ else:
580
+ for cat, count in per_category_count.items():
581
+ human_text_lines.append(f"\t{cat}: {count}")
582
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
583
+ # Append zone-wise total counts if available
584
+ if zone_analysis:
585
+ human_text_lines.append("\tZones (total):")
586
+ for zone_name, zone_data in zone_analysis.items():
587
+ total_count = 0
588
+ if isinstance(zone_data, dict):
589
+ # Prefer the numeric cumulative total if available
590
+ if "total_count" in zone_data and isinstance(zone_data.get("total_count"), (int, float)):
591
+ total_count = zone_data.get("total_count", 0)
592
+ # Fallback: compute from list of total_track_ids if present
593
+ elif "total_track_ids" in zone_data and isinstance(zone_data.get("total_track_ids"), list):
594
+ total_count = len(zone_data.get("total_track_ids", []))
595
+ else:
596
+ # Last resort: try to sum numeric values present
597
+ counts_dict = zone_data if isinstance(zone_data, dict) else {}
598
+ total_count = sum(v for v in counts_dict.values() if isinstance(v, (int, float)))
599
+ human_text_lines.append(f"\t{zone_name}: {int(total_count)}")
600
+ else:
601
+ for cat, count in total_counts_dict.items():
602
+ if count > 0:
603
+ human_text_lines.append(f"\t{cat}: {count}")
604
+ if alerts:
605
+ for alert in alerts:
606
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
607
+ else:
608
+ human_text_lines.append("Alerts: None")
609
+ human_text = "\n".join(human_text_lines)
610
+
611
+ reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
612
+ tracking_stat = self.create_tracking_stats(
613
+ total_counts=total_counts,
614
+ current_counts=current_counts,
615
+ detections=detections,
616
+ human_text=human_text,
617
+ camera_info=camera_info,
618
+ alerts=alerts,
619
+ alert_settings=alert_settings,
620
+ reset_settings=reset_settings,
621
+ start_time=high_precision_start_timestamp,
622
+ reset_time=high_precision_reset_timestamp
623
+ )
624
+ tracking_stats.append(tracking_stat)
625
+ return tracking_stats
626
+
627
+ def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, alerts: Any, config: VehiclePeopleDroneMonitoringConfig,
628
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
629
+ if is_empty:
630
+ return []
631
+
632
+ def _generate_summary(self, summary: dict, zone_analysis: Dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
633
+ """
634
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
635
+ """
636
+ lines = []
637
+ lines.append("Application Name: "+self.CASE_TYPE)
638
+ lines.append("Application Version: "+self.CASE_VERSION)
639
+ if len(incidents) > 0:
640
+ lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
641
+ if len(tracking_stats) > 0:
642
+ lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
643
+ if len(business_analytics) > 0:
644
+ lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
645
+
646
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
647
+ lines.append("Summary: "+"No Summary Data")
648
+
649
+ return ["\n".join(lines)]
650
+
651
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
652
+ frame_track_ids = set()
653
+ for det in detections:
654
+ tid = det.get('track_id')
655
+ if tid is not None:
656
+ frame_track_ids.add(tid)
657
+ total_track_ids = set()
658
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
659
+ total_track_ids.update(s)
660
+ return {
661
+ "total_count": len(total_track_ids),
662
+ "current_frame_count": len(frame_track_ids),
663
+ "total_unique_track_ids": len(total_track_ids),
664
+ "current_frame_track_ids": list(frame_track_ids),
665
+ "last_update_time": time.time(),
666
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
667
+ }
668
+
669
+ def _update_tracking_state(self, detections: list):
670
+ if not hasattr(self, "_per_category_total_track_ids"):
671
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
672
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
673
+
674
+ for det in detections:
675
+ cat = det.get("category")
676
+ raw_track_id = det.get("track_id")
677
+ if cat not in self.target_categories or raw_track_id is None:
678
+ continue
679
+ bbox = det.get("bounding_box", det.get("bbox"))
680
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
681
+ det["track_id"] = canonical_id
682
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
683
+ self._current_frame_track_ids[cat].add(canonical_id)
684
+
685
+ def get_total_counts(self):
686
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
687
+
688
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
689
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
690
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
691
+
692
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
693
+ hours = int(timestamp // 3600)
694
+ minutes = int((timestamp % 3600) // 60)
695
+ seconds = round(float(timestamp % 60), 2)
696
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
697
+
698
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
699
+ """Get formatted current timestamp based on stream type."""
700
+ if not stream_info:
701
+ return "00:00:00.00"
702
+
703
+ if precision:
704
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
705
+ if frame_id:
706
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
707
+ else:
708
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
709
+ stream_time_str = self._format_timestamp_for_video(start_time)
710
+
711
+
712
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
713
+ else:
714
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
715
+
716
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
717
+ if frame_id:
718
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
719
+ else:
720
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
721
+
722
+ stream_time_str = self._format_timestamp_for_video(start_time)
723
+
724
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
725
+ else:
726
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
727
+ if stream_time_str:
728
+ try:
729
+ timestamp_str = stream_time_str.replace(" UTC", "")
730
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
731
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
732
+ return self._format_timestamp_for_stream(timestamp)
733
+ except:
734
+ return self._format_timestamp_for_stream(time.time())
735
+ else:
736
+ return self._format_timestamp_for_stream(time.time())
737
+
738
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
739
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
740
+ if not stream_info:
741
+ return "00:00:00"
742
+
743
+ if precision:
744
+ if self.start_timer is None:
745
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
746
+ return self._format_timestamp(self.start_timer)
747
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
748
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
749
+ return self._format_timestamp(self.start_timer)
750
+ else:
751
+ return self._format_timestamp(self.start_timer)
752
+
753
+ if self.start_timer is None:
754
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
755
+ return self._format_timestamp(self.start_timer)
756
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
757
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
758
+ return self._format_timestamp(self.start_timer)
759
+
760
+ else:
761
+ if self.start_timer is not None:
762
+ return self._format_timestamp(self.start_timer)
763
+
764
+ if self._tracking_start_time is None:
765
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
766
+ if stream_time_str:
767
+ try:
768
+ timestamp_str = stream_time_str.replace(" UTC", "")
769
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
770
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
771
+ except:
772
+ self._tracking_start_time = time.time()
773
+ else:
774
+ self._tracking_start_time = time.time()
775
+
776
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
777
+ dt = dt.replace(minute=0, second=0, microsecond=0)
778
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
779
+
780
+ def _format_timestamp(self, timestamp: Any) -> str:
781
+ """Format a timestamp so that exactly two digits follow the decimal point (milliseconds).
782
+
783
+ The input can be either:
784
+ 1. A numeric Unix timestamp (``float`` / ``int``) – it will first be converted to a
785
+ string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
786
+ 2. A string already following the same layout.
787
+
788
+ The returned value preserves the overall format of the input but truncates or pads
789
+ the fractional seconds portion to **exactly two digits**.
790
+
791
+ Example
792
+ -------
793
+ >>> self._format_timestamp("2025-08-19-04:22:47.187574 UTC")
794
+ '2025-08-19-04:22:47.18 UTC'
795
+ """
796
+
797
+ # Convert numeric timestamps to the expected string representation first
798
+ if isinstance(timestamp, (int, float)):
799
+ timestamp = datetime.fromtimestamp(timestamp, timezone.utc).strftime(
800
+ '%Y-%m-%d-%H:%M:%S.%f UTC'
801
+ )
802
+
803
+ # Ensure we are working with a string from here on
804
+ if not isinstance(timestamp, str):
805
+ return str(timestamp)
806
+
807
+ # If there is no fractional component, simply return the original string
808
+ if '.' not in timestamp:
809
+ return timestamp
810
+
811
+ # Split out the main portion (up to the decimal point)
812
+ main_part, fractional_and_suffix = timestamp.split('.', 1)
813
+
814
+ # Separate fractional digits from the suffix (typically ' UTC')
815
+ if ' ' in fractional_and_suffix:
816
+ fractional_part, suffix = fractional_and_suffix.split(' ', 1)
817
+ suffix = ' ' + suffix # Re-attach the space removed by split
818
+ else:
819
+ fractional_part, suffix = fractional_and_suffix, ''
820
+
821
+ # Guarantee exactly two digits for the fractional part
822
+ fractional_part = (fractional_part + '00')[:2]
823
+
824
+ return f"{main_part}.{fractional_part}{suffix}"
825
+
826
+ def _count_categories(self, detections: list, config: VehiclePeopleDroneMonitoringConfig) -> dict:
827
+ counts = {}
828
+ for det in detections:
829
+ cat = det.get('category', 'unknown')
830
+ counts[cat] = counts.get(cat, 0) + 1
831
+ return {
832
+ "total_count": sum(counts.values()),
833
+ "per_category_count": counts,
834
+ "detections": [
835
+ {
836
+ "bounding_box": det.get("bounding_box"),
837
+ "category": det.get("category"),
838
+ "confidence": det.get("confidence"),
839
+ "track_id": det.get("track_id"),
840
+ "frame_id": det.get("frame_id")
841
+ }
842
+ for det in detections
843
+ ]
844
+ }
845
+
846
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
847
+ return [
848
+ {
849
+ "category": det.get("category", "unknown"),
850
+ "confidence": det.get("confidence", 0.0),
851
+ "bounding_box": det.get("bounding_box", {})
852
+ }
853
+ for det in detections
854
+ ]
855
+
856
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
857
+ def _bbox_to_list(bbox):
858
+ if bbox is None:
859
+ return []
860
+ if isinstance(bbox, list):
861
+ return bbox[:4] if len(bbox) >= 4 else []
862
+ if isinstance(bbox, dict):
863
+ if "xmin" in bbox:
864
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
865
+ if "x1" in bbox:
866
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
867
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
868
+ return values[:4] if len(values) >= 4 else []
869
+ return []
870
+
871
+ l1 = _bbox_to_list(box1)
872
+ l2 = _bbox_to_list(box2)
873
+ if len(l1) < 4 or len(l2) < 4:
874
+ return 0.0
875
+ x1_min, y1_min, x1_max, y1_max = l1
876
+ x2_min, y2_min, x2_max, y2_max = l2
877
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
878
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
879
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
880
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
881
+ inter_x_min = max(x1_min, x2_min)
882
+ inter_y_min = max(y1_min, y2_min)
883
+ inter_x_max = min(x1_max, x2_max)
884
+ inter_y_max = min(y1_max, y2_max)
885
+ inter_w = max(0.0, inter_x_max - inter_x_min)
886
+ inter_h = max(0.0, inter_y_max - inter_y_min)
887
+ inter_area = inter_w * inter_h
888
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
889
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
890
+ union_area = area1 + area2 - inter_area
891
+ return (inter_area / union_area) if union_area > 0 else 0.0
892
+
893
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
894
+ if raw_id is None or bbox is None:
895
+ return raw_id
896
+ now = time.time()
897
+ if raw_id in self._track_aliases:
898
+ canonical_id = self._track_aliases[raw_id]
899
+ track_info = self._canonical_tracks.get(canonical_id)
900
+ if track_info is not None:
901
+ track_info["last_bbox"] = bbox
902
+ track_info["last_update"] = now
903
+ track_info["raw_ids"].add(raw_id)
904
+ return canonical_id
905
+ for canonical_id, info in self._canonical_tracks.items():
906
+ if now - info["last_update"] > self._track_merge_time_window:
907
+ continue
908
+ iou = self._compute_iou(bbox, info["last_bbox"])
909
+ if iou >= self._track_merge_iou_threshold:
910
+ self._track_aliases[raw_id] = canonical_id
911
+ info["last_bbox"] = bbox
912
+ info["last_update"] = now
913
+ info["raw_ids"].add(raw_id)
914
+ return canonical_id
915
+ canonical_id = raw_id
916
+ self._track_aliases[raw_id] = canonical_id
917
+ self._canonical_tracks[canonical_id] = {
918
+ "last_bbox": bbox,
919
+ "last_update": now,
920
+ "raw_ids": {raw_id},
921
+ }
922
+ return canonical_id
923
+
924
+ def _get_tracking_start_time(self) -> str:
925
+ if self._tracking_start_time is None:
926
+ return "N/A"
927
+ return self._format_timestamp(self._tracking_start_time)
928
+
929
+ def _set_tracking_start_time(self) -> None:
930
+ self._tracking_start_time = time.time()