matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,706 @@
1
+ from typing import Any, Dict, List, Optional
2
+ from dataclasses import asdict
3
+ import time
4
+ from datetime import datetime, timezone
5
+
6
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
7
+ from ..utils import (
8
+ filter_by_confidence,
9
+ filter_by_categories,
10
+ apply_category_mapping,
11
+ count_objects_by_category,
12
+ count_objects_in_zones,
13
+ calculate_counting_summary,
14
+ match_results_structure,
15
+ bbox_smoothing,
16
+ BBoxSmoothingConfig,
17
+ BBoxSmoothingTracker
18
+ )
19
+ from dataclasses import dataclass, field
20
+ from ..core.config import PeopleCountingConfig, BaseConfig, AlertConfig, ZoneConfig
21
+
22
+
23
+ class PeopleCountingUseCase(BaseProcessor):
24
+ CATEGORY_DISPLAY = {
25
+ "person": "Person",
26
+ "people": "People",
27
+ "human": "Human",
28
+ "man": "Man",
29
+ "woman": "Woman",
30
+ "male": "Male",
31
+ "female": "Female"
32
+ }
33
+
34
+ def __init__(self):
35
+ super().__init__("people_counting")
36
+ self.category = "general"
37
+ self.CASE_TYPE: Optional[str] = 'people_counting'
38
+ self.CASE_VERSION: Optional[str] = '1.4'
39
+ self.target_categories = ['person', 'people','human','man','woman','male','female']
40
+ self.smoothing_tracker = None
41
+ self.tracker = None
42
+ self._total_frame_counter = 0
43
+ self._global_frame_offset = 0
44
+ self._tracking_start_time = None
45
+ self._track_aliases: Dict[Any, Any] = {}
46
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
47
+ self._track_merge_iou_threshold: float = 0.05
48
+ self._track_merge_time_window: float = 7.0
49
+ self._ascending_alert_list: List[int] = []
50
+ self.current_incident_end_timestamp: str = "N/A"
51
+ self.start_timer = None
52
+
53
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
54
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
55
+ processing_start = time.time()
56
+ if not isinstance(config, PeopleCountingConfig):
57
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
58
+ if context is None:
59
+ context = ProcessingContext()
60
+
61
+ input_format = match_results_structure(data)
62
+ context.input_format = input_format
63
+ context.confidence_threshold = config.confidence_threshold
64
+
65
+ if config.confidence_threshold is not None:
66
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
67
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
68
+ else:
69
+ processed_data = data
70
+ self.logger.debug("Did not apply confidence filtering since no threshold provided")
71
+
72
+ if config.index_to_category:
73
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
74
+ self.logger.debug("Applied category mapping")
75
+
76
+ if config.target_categories:
77
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
78
+ self.logger.debug("Applied category filtering")
79
+
80
+ # if config.enable_smoothing:
81
+ # if self.smoothing_tracker is None:
82
+ # smoothing_config = BBoxSmoothingConfig(
83
+ # smoothing_algorithm=config.smoothing_algorithm,
84
+ # window_size=config.smoothing_window_size,
85
+ # cooldown_frames=config.smoothing_cooldown_frames,
86
+ # confidence_threshold=config.confidence_threshold,
87
+ # confidence_range_factor=config.smoothing_confidence_range_factor,
88
+ # enable_smoothing=True
89
+ # )
90
+ # self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
91
+ # processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
92
+
93
+ try:
94
+ from ..advanced_tracker import AdvancedTracker
95
+ from ..advanced_tracker.config import TrackerConfig
96
+ if self.tracker is None:
97
+ tracker_config = TrackerConfig(
98
+ track_high_thresh=0.4,
99
+ track_low_thresh=0.05,
100
+ new_track_thresh=0.3,
101
+ match_thresh=0.8)
102
+ self.tracker = AdvancedTracker(tracker_config)
103
+ self.logger.info("Initialized AdvancedTracker for People Counting")
104
+ processed_data = self.tracker.update(processed_data)
105
+ except Exception as e:
106
+ self.logger.warning(f"AdvancedTracker failed: {e}")
107
+
108
+ self._update_tracking_state(processed_data)
109
+ self._total_frame_counter += 1
110
+
111
+ frame_number = None
112
+ if stream_info:
113
+ input_settings = stream_info.get("input_settings", {})
114
+ start_frame = input_settings.get("start_frame")
115
+ end_frame = input_settings.get("end_frame")
116
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
117
+ frame_number = start_frame
118
+
119
+ general_counting_summary = calculate_counting_summary(data)
120
+ counting_summary = self._count_categories(processed_data, config)
121
+ total_counts = self.get_total_counts()
122
+ counting_summary['total_counts'] = total_counts
123
+
124
+ alerts = self._check_alerts(counting_summary, frame_number, config)
125
+ predictions = self._extract_predictions(processed_data)
126
+
127
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
128
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
129
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
130
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
131
+
132
+ incidents = incidents_list[0] if incidents_list else {}
133
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
134
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
135
+ summary = summary_list[0] if summary_list else {}
136
+ agg_summary = {str(frame_number): {
137
+ "incidents": incidents,
138
+ "tracking_stats": tracking_stats,
139
+ "business_analytics": business_analytics,
140
+ "alerts": alerts,
141
+ "human_text": summary}
142
+ }
143
+
144
+ context.mark_completed()
145
+ result = self.create_result(
146
+ data={"agg_summary": agg_summary},
147
+ usecase=self.name,
148
+ category=self.category,
149
+ context=context
150
+ )
151
+ proc_time = time.time() - processing_start
152
+ processing_latency_ms = proc_time * 1000.0
153
+ processing_fps = (1.0 / proc_time) if proc_time > 0 else None
154
+ # Log the performance metrics using the module-level logger
155
+ print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
156
+ return result
157
+
158
+ def _check_alerts(self, summary: dict, frame_number: Any, config: PeopleCountingConfig) -> List[Dict]:
159
+ def get_trend(data, lookback=900, threshold=0.6):
160
+ window = data[-lookback:] if len(data) >= lookback else data
161
+ if len(window) < 2:
162
+ return True
163
+ increasing = 0
164
+ total = 0
165
+ for i in range(1, len(window)):
166
+ if window[i] >= window[i - 1]:
167
+ increasing += 1
168
+ total += 1
169
+ ratio = increasing / total
170
+ return ratio >= threshold
171
+
172
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
173
+ alerts = []
174
+ total_detections = summary.get("total_count", 0)
175
+ total_counts_dict = summary.get("total_counts", {})
176
+ per_category_count = summary.get("per_category_count", {})
177
+
178
+ if not config.alert_config:
179
+ return alerts
180
+
181
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
182
+ for category, threshold in config.alert_config.count_thresholds.items():
183
+ if category == "all" and total_detections > threshold:
184
+ alerts.append({
185
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
186
+ "alert_id": f"alert_{category}_{frame_key}",
187
+ "incident_category": self.CASE_TYPE,
188
+ "threshold_level": threshold,
189
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
190
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
191
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
192
+ })
193
+ elif category in per_category_count and per_category_count[category] > threshold:
194
+ alerts.append({
195
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
196
+ "alert_id": f"alert_{category}_{frame_key}",
197
+ "incident_category": self.CASE_TYPE,
198
+ "threshold_level": threshold,
199
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
200
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
201
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
202
+ })
203
+ return alerts
204
+
205
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: PeopleCountingConfig,
206
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
207
+ incidents = []
208
+ total_detections = counting_summary.get("total_count", 0)
209
+ current_timestamp = self._get_current_timestamp_str(stream_info)
210
+ camera_info = self.get_camera_info_from_stream(stream_info)
211
+
212
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
213
+
214
+ if total_detections > 0:
215
+ level = "low"
216
+ intensity = 5.0
217
+ start_timestamp = self._get_start_timestamp_str(stream_info)
218
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
219
+ self.current_incident_end_timestamp = 'Incident still active'
220
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
221
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
222
+ self.current_incident_end_timestamp = current_timestamp
223
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
224
+ self.current_incident_end_timestamp = 'N/A'
225
+
226
+ if config.alert_config and config.alert_config.count_thresholds:
227
+ threshold = config.alert_config.count_thresholds.get("all", 15)
228
+ intensity = min(10.0, (total_detections / threshold) * 10)
229
+ if intensity >= 9:
230
+ level = "critical"
231
+ self._ascending_alert_list.append(3)
232
+ elif intensity >= 7:
233
+ level = "significant"
234
+ self._ascending_alert_list.append(2)
235
+ elif intensity >= 5:
236
+ level = "medium"
237
+ self._ascending_alert_list.append(1)
238
+ else:
239
+ level = "low"
240
+ self._ascending_alert_list.append(0)
241
+ else:
242
+ if total_detections > 30:
243
+ level = "critical"
244
+ intensity = 10.0
245
+ self._ascending_alert_list.append(3)
246
+ elif total_detections > 25:
247
+ level = "significant"
248
+ intensity = 9.0
249
+ self._ascending_alert_list.append(2)
250
+ elif total_detections > 15:
251
+ level = "medium"
252
+ intensity = 7.0
253
+ self._ascending_alert_list.append(1)
254
+ else:
255
+ level = "low"
256
+ intensity = min(10.0, total_detections / 3.0)
257
+ self._ascending_alert_list.append(0)
258
+
259
+ human_text_lines = [f"COUNTING INCIDENTS DETECTED @ {current_timestamp}:"]
260
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
261
+ human_text = "\n".join(human_text_lines)
262
+
263
+ alert_settings = []
264
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
265
+ alert_settings.append({
266
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
267
+ "incident_category": self.CASE_TYPE,
268
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
269
+ "ascending": True,
270
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
271
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
272
+ })
273
+
274
+ event = self.create_incident(
275
+ incident_id=f"{self.CASE_TYPE}_{frame_number}",
276
+ incident_type=self.CASE_TYPE,
277
+ severity_level=level,
278
+ human_text=human_text,
279
+ camera_info=camera_info,
280
+ alerts=alerts,
281
+ alert_settings=alert_settings,
282
+ start_time=start_timestamp,
283
+ end_time=self.current_incident_end_timestamp,
284
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
285
+ )
286
+ incidents.append(event)
287
+ else:
288
+ self._ascending_alert_list.append(0)
289
+ incidents.append({})
290
+ return incidents
291
+
292
+ def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: PeopleCountingConfig,
293
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
294
+ camera_info = self.get_camera_info_from_stream(stream_info)
295
+ tracking_stats = []
296
+ total_detections = counting_summary.get("total_count", 0)
297
+ total_counts_dict = counting_summary.get("total_counts", {})
298
+ per_category_count = counting_summary.get("per_category_count", {})
299
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
300
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
301
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
302
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
303
+
304
+ total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
305
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
306
+
307
+ detections = []
308
+ for detection in counting_summary.get("detections", []):
309
+ bbox = detection.get("bounding_box", {})
310
+ category = detection.get("category", "person")
311
+ if detection.get("masks"):
312
+ segmentation = detection.get("masks", [])
313
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
314
+ elif detection.get("segmentation"):
315
+ segmentation = detection.get("segmentation")
316
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
317
+ elif detection.get("mask"):
318
+ segmentation = detection.get("mask")
319
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
320
+ else:
321
+ detection_obj = self.create_detection_object(category, bbox)
322
+ detections.append(detection_obj)
323
+
324
+ alert_settings = []
325
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
326
+ alert_settings.append({
327
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
328
+ "incident_category": self.CASE_TYPE,
329
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
330
+ "ascending": True,
331
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
332
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
333
+ })
334
+
335
+ human_text_lines = []
336
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
337
+ for cat, count in per_category_count.items():
338
+ human_text_lines.append(f"\t- People Detected: {count}")
339
+ human_text_lines.append("")
340
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
341
+ for cat, count in total_counts_dict.items():
342
+ if count > 0:
343
+ human_text_lines.append("")
344
+ human_text_lines.append(f"\t- Total unique people count: {count}")
345
+ if alerts:
346
+ for alert in alerts:
347
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
348
+ else:
349
+ human_text_lines.append("Alerts: None")
350
+ human_text = "\n".join(human_text_lines)
351
+
352
+ reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
353
+ tracking_stat = self.create_tracking_stats(
354
+ total_counts=total_counts,
355
+ current_counts=current_counts,
356
+ detections=detections,
357
+ human_text=human_text,
358
+ camera_info=camera_info,
359
+ alerts=alerts,
360
+ alert_settings=alert_settings,
361
+ reset_settings=reset_settings,
362
+ start_time=high_precision_start_timestamp,
363
+ reset_time=high_precision_reset_timestamp
364
+ )
365
+ tracking_stats.append(tracking_stat)
366
+ return tracking_stats
367
+
368
+ def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: PeopleCountingConfig,
369
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
370
+ if is_empty:
371
+ return []
372
+
373
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
374
+ """
375
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
376
+ """
377
+ lines = []
378
+ lines.append("Application Name: "+self.CASE_TYPE)
379
+ lines.append("Application Version: "+self.CASE_VERSION)
380
+ # if len(incidents) > 0:
381
+ # lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
382
+ if len(tracking_stats) > 0:
383
+ lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
384
+ if len(business_analytics) > 0:
385
+ lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
386
+
387
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
388
+ lines.append("Summary: "+"No Summary Data")
389
+
390
+ return ["\n".join(lines)]
391
+
392
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
393
+ frame_track_ids = set()
394
+ for det in detections:
395
+ tid = det.get('track_id')
396
+ if tid is not None:
397
+ frame_track_ids.add(tid)
398
+ total_track_ids = set()
399
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
400
+ total_track_ids.update(s)
401
+ return {
402
+ "total_count": len(total_track_ids),
403
+ "current_frame_count": len(frame_track_ids),
404
+ "total_unique_track_ids": len(total_track_ids),
405
+ "current_frame_track_ids": list(frame_track_ids),
406
+ "last_update_time": time.time(),
407
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
408
+ }
409
+
410
+ def _update_tracking_state(self, detections: list):
411
+ if not hasattr(self, "_per_category_total_track_ids"):
412
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
413
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
414
+
415
+ for det in detections:
416
+ cat = det.get("category")
417
+ raw_track_id = det.get("track_id")
418
+ if cat not in self.target_categories or raw_track_id is None:
419
+ continue
420
+ bbox = det.get("bounding_box", det.get("bbox"))
421
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
422
+ det["track_id"] = canonical_id
423
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
424
+ self._current_frame_track_ids[cat].add(canonical_id)
425
+
426
+ def get_total_counts(self):
427
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
428
+
429
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
430
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
431
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
432
+
433
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
434
+ hours = int(timestamp // 3600)
435
+ minutes = int((timestamp % 3600) // 60)
436
+ seconds = round(float(timestamp % 60), 2)
437
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
438
+
439
+ def _format_timestamp(self, timestamp: Any) -> str:
440
+ """Format a timestamp to match the current timestamp format: YYYY:MM:DD HH:MM:SS.
441
+
442
+ The input can be either:
443
+ 1. A numeric Unix timestamp (``float`` / ``int``) – it will be converted to datetime.
444
+ 2. A string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
445
+
446
+ The returned value will be in the format: YYYY:MM:DD HH:MM:SS (no milliseconds, no UTC suffix).
447
+
448
+ Example
449
+ -------
450
+ >>> self._format_timestamp("2025-10-27-19:31:20.187574 UTC")
451
+ '2025:10:27 19:31:20'
452
+ """
453
+
454
+ # Convert numeric timestamps to datetime first
455
+ if isinstance(timestamp, (int, float)):
456
+ dt = datetime.fromtimestamp(timestamp, timezone.utc)
457
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
458
+
459
+ # Ensure we are working with a string from here on
460
+ if not isinstance(timestamp, str):
461
+ return str(timestamp)
462
+
463
+ # Remove ' UTC' suffix if present
464
+ timestamp_clean = timestamp.replace(' UTC', '').strip()
465
+
466
+ # Remove milliseconds if present (everything after the last dot)
467
+ if '.' in timestamp_clean:
468
+ timestamp_clean = timestamp_clean.split('.')[0]
469
+
470
+ # Parse the timestamp string and convert to desired format
471
+ try:
472
+ # Handle format: YYYY-MM-DD-HH:MM:SS
473
+ if timestamp_clean.count('-') >= 2:
474
+ # Replace first two dashes with colons for date part, third with space
475
+ parts = timestamp_clean.split('-')
476
+ if len(parts) >= 4:
477
+ # parts = ['2025', '10', '27', '19:31:20']
478
+ formatted = f"{parts[0]}:{parts[1]}:{parts[2]} {'-'.join(parts[3:])}"
479
+ return formatted
480
+ except Exception:
481
+ pass
482
+
483
+ # If parsing fails, return the cleaned string as-is
484
+ return timestamp_clean
485
+
486
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
487
+ """Get formatted current timestamp based on stream type."""
488
+
489
+ if not stream_info:
490
+ return "00:00:00.00"
491
+ if precision:
492
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
493
+ if frame_id:
494
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
495
+ else:
496
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
497
+ stream_time_str = self._format_timestamp_for_video(start_time)
498
+
499
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
500
+ else:
501
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
502
+
503
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
504
+ if frame_id:
505
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
506
+ else:
507
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
508
+
509
+ stream_time_str = self._format_timestamp_for_video(start_time)
510
+
511
+
512
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
513
+ else:
514
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
515
+ if stream_time_str:
516
+ try:
517
+ timestamp_str = stream_time_str.replace(" UTC", "")
518
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
519
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
520
+ return self._format_timestamp_for_stream(timestamp)
521
+ except:
522
+ return self._format_timestamp_for_stream(time.time())
523
+ else:
524
+ return self._format_timestamp_for_stream(time.time())
525
+
526
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
527
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
528
+ if not stream_info:
529
+ return "00:00:00"
530
+
531
+ if precision:
532
+ if self.start_timer is None:
533
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
534
+ if not candidate or candidate == "NA":
535
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
536
+ self.start_timer = candidate
537
+ return self._format_timestamp(self.start_timer)
538
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
539
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
540
+ if not candidate or candidate == "NA":
541
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
542
+ self.start_timer = candidate
543
+ return self._format_timestamp(self.start_timer)
544
+ else:
545
+ return self._format_timestamp(self.start_timer)
546
+
547
+ if self.start_timer is None:
548
+ # Prefer direct input_settings.stream_time if available and not NA
549
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
550
+ if not candidate or candidate == "NA":
551
+ # Fallback to nested stream_info.stream_time used by current timestamp path
552
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
553
+ if stream_time_str:
554
+ try:
555
+ timestamp_str = stream_time_str.replace(" UTC", "")
556
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
557
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
558
+ candidate = datetime.fromtimestamp(self._tracking_start_time, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
559
+ except:
560
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
561
+ else:
562
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
563
+ self.start_timer = candidate
564
+ return self._format_timestamp(self.start_timer)
565
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
566
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
567
+ if not candidate or candidate == "NA":
568
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
569
+ if stream_time_str:
570
+ try:
571
+ timestamp_str = stream_time_str.replace(" UTC", "")
572
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
573
+ ts = dt.replace(tzinfo=timezone.utc).timestamp()
574
+ candidate = datetime.fromtimestamp(ts, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
575
+ except:
576
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
577
+ else:
578
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
579
+ self.start_timer = candidate
580
+ return self._format_timestamp(self.start_timer)
581
+
582
+ else:
583
+ if self.start_timer is not None and self.start_timer != "NA":
584
+ return self._format_timestamp(self.start_timer)
585
+
586
+ if self._tracking_start_time is None:
587
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
588
+ if stream_time_str:
589
+ try:
590
+ timestamp_str = stream_time_str.replace(" UTC", "")
591
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
592
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
593
+ except:
594
+ self._tracking_start_time = time.time()
595
+ else:
596
+ self._tracking_start_time = time.time()
597
+
598
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
599
+ dt = dt.replace(minute=0, second=0, microsecond=0)
600
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
601
+
602
+ def _count_categories(self, detections: list, config: PeopleCountingConfig) -> dict:
603
+ counts = {}
604
+ for det in detections:
605
+ cat = det.get('category', 'unknown')
606
+ counts[cat] = counts.get(cat, 0) + 1
607
+ return {
608
+ "total_count": sum(counts.values()),
609
+ "per_category_count": counts,
610
+ "detections": [
611
+ {
612
+ "bounding_box": det.get("bounding_box"),
613
+ "category": det.get("category"),
614
+ "confidence": det.get("confidence"),
615
+ "track_id": det.get("track_id"),
616
+ "frame_id": det.get("frame_id")
617
+ }
618
+ for det in detections
619
+ ]
620
+ }
621
+
622
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
623
+ return [
624
+ {
625
+ "category": det.get("category", "unknown"),
626
+ "confidence": det.get("confidence", 0.0),
627
+ "bounding_box": det.get("bounding_box", {})
628
+ }
629
+ for det in detections
630
+ ]
631
+
632
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
633
+ def _bbox_to_list(bbox):
634
+ if bbox is None:
635
+ return []
636
+ if isinstance(bbox, list):
637
+ return bbox[:4] if len(bbox) >= 4 else []
638
+ if isinstance(bbox, dict):
639
+ if "xmin" in bbox:
640
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
641
+ if "x1" in bbox:
642
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
643
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
644
+ return values[:4] if len(values) >= 4 else []
645
+ return []
646
+
647
+ l1 = _bbox_to_list(box1)
648
+ l2 = _bbox_to_list(box2)
649
+ if len(l1) < 4 or len(l2) < 4:
650
+ return 0.0
651
+ x1_min, y1_min, x1_max, y1_max = l1
652
+ x2_min, y2_min, x2_max, y2_max = l2
653
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
654
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
655
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
656
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
657
+ inter_x_min = max(x1_min, x2_min)
658
+ inter_y_min = max(y1_min, y2_min)
659
+ inter_x_max = min(x1_max, x2_max)
660
+ inter_y_max = min(y1_max, y2_max)
661
+ inter_w = max(0.0, inter_x_max - inter_x_min)
662
+ inter_h = max(0.0, inter_y_max - inter_y_min)
663
+ inter_area = inter_w * inter_h
664
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
665
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
666
+ union_area = area1 + area2 - inter_area
667
+ return (inter_area / union_area) if union_area > 0 else 0.0
668
+
669
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
670
+ if raw_id is None or bbox is None:
671
+ return raw_id
672
+ now = time.time()
673
+ if raw_id in self._track_aliases:
674
+ canonical_id = self._track_aliases[raw_id]
675
+ track_info = self._canonical_tracks.get(canonical_id)
676
+ if track_info is not None:
677
+ track_info["last_bbox"] = bbox
678
+ track_info["last_update"] = now
679
+ track_info["raw_ids"].add(raw_id)
680
+ return canonical_id
681
+ for canonical_id, info in self._canonical_tracks.items():
682
+ if now - info["last_update"] > self._track_merge_time_window:
683
+ continue
684
+ iou = self._compute_iou(bbox, info["last_bbox"])
685
+ if iou >= self._track_merge_iou_threshold:
686
+ self._track_aliases[raw_id] = canonical_id
687
+ info["last_bbox"] = bbox
688
+ info["last_update"] = now
689
+ info["raw_ids"].add(raw_id)
690
+ return canonical_id
691
+ canonical_id = raw_id
692
+ self._track_aliases[raw_id] = canonical_id
693
+ self._canonical_tracks[canonical_id] = {
694
+ "last_bbox": bbox,
695
+ "last_update": now,
696
+ "raw_ids": {raw_id},
697
+ }
698
+ return canonical_id
699
+
700
+ def _get_tracking_start_time(self) -> str:
701
+ if self._tracking_start_time is None:
702
+ return "N/A"
703
+ return self._format_timestamp(self._tracking_start_time)
704
+
705
+ def _set_tracking_start_time(self) -> None:
706
+ self._tracking_start_time = time.time()