matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,829 @@
1
+ from typing import Any, Dict, List, Optional, Tuple
2
+ from dataclasses import asdict
3
+ import time
4
+ from datetime import datetime, timezone
5
+
6
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
7
+ from ..utils import (
8
+ filter_by_confidence,
9
+ apply_category_mapping,
10
+ count_objects_by_category,
11
+ count_objects_in_zones,
12
+ calculate_counting_summary,
13
+ match_results_structure,
14
+ bbox_smoothing,
15
+ BBoxSmoothingConfig,
16
+ BBoxSmoothingTracker,
17
+ get_bbox_center,
18
+ point_in_polygon
19
+ )
20
+ from dataclasses import dataclass, field
21
+ from ..core.config import BaseConfig, AlertConfig
22
+
23
+ @dataclass
24
+ class DwellConfig(BaseConfig):
25
+ """Configuration for dwell detection use case."""
26
+ enable_smoothing: bool = True
27
+ centroid_threshold: float = 10.0
28
+ smoothing_algorithm: str = "observability"
29
+ smoothing_window_size: int = 20
30
+ smoothing_cooldown_frames: int = 5
31
+ smoothing_confidence_range_factor: float = 0.5
32
+ confidence_threshold: float = 0.6
33
+ dwell_threshold: int = 300 # Frames to consider a person as dwelling
34
+ usecase_categories: List[str] = field(default_factory=lambda: ['person'])
35
+ target_categories: List[str] = field(default_factory=lambda: ['person'])
36
+ alert_config: Optional[AlertConfig] = None
37
+ zone_config: Optional[Dict[str, Dict[str, List[List[float]]]]] = None
38
+ index_to_category: Optional[Dict[int, str]] = field(
39
+ default_factory=lambda: {0: "person"}
40
+ )
41
+ person_index: int = 0
42
+
43
+ class DwellUseCase(BaseProcessor):
44
+ CATEGORY_DISPLAY = {"person": "Person"}
45
+
46
+ def __init__(self):
47
+ super().__init__("dwell")
48
+ self.category = "general"
49
+ self.CASE_TYPE: Optional[str] = 'dwell'
50
+ self.CASE_VERSION: Optional[str] = '1.0'
51
+ self.target_categories = ['person']
52
+ self.smoothing_tracker = None
53
+ self.tracker = None
54
+ self._total_frame_counter = 0
55
+ self._global_frame_offset = 0
56
+ self._tracking_start_time = None
57
+ self._track_aliases: Dict[Any, Any] = {}
58
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
59
+ self._track_merge_iou_threshold: float = 0.05
60
+ self._track_merge_time_window: float = 7.0
61
+ self._ascending_alert_list: List[int] = []
62
+ self.current_incident_end_timestamp: str = "N/A"
63
+ self._stationary_tracks: Dict[Any, Dict[str, Any]] = {}
64
+ self._zone_current_track_ids: Dict[str, set] = {}
65
+ self._zone_total_track_ids: Dict[str, set] = {}
66
+ self._zone_current_counts: Dict[str, int] = {}
67
+ self._zone_total_counts: Dict[str, int] = {}
68
+ self.start_timer = None
69
+
70
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
71
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
72
+ start_time = time.time()
73
+ if not isinstance(config, DwellConfig):
74
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
75
+ if context is None:
76
+ context = ProcessingContext()
77
+
78
+ input_format = match_results_structure(data)
79
+ context.input_format = input_format
80
+ context.confidence_threshold = config.confidence_threshold
81
+
82
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
83
+ if config.index_to_category:
84
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
85
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
86
+
87
+ if config.enable_smoothing:
88
+ if self.smoothing_tracker is None:
89
+ smoothing_config = BBoxSmoothingConfig(
90
+ smoothing_algorithm=config.smoothing_algorithm,
91
+ window_size=config.smoothing_window_size,
92
+ cooldown_frames=config.smoothing_cooldown_frames,
93
+ confidence_threshold=config.confidence_threshold,
94
+ confidence_range_factor=config.smoothing_confidence_range_factor,
95
+ enable_smoothing=True
96
+ )
97
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
98
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
99
+
100
+ try:
101
+ from ..advanced_tracker import AdvancedTracker
102
+ from ..advanced_tracker.config import TrackerConfig
103
+ if self.tracker is None:
104
+ tracker_config = TrackerConfig()
105
+ self.tracker = AdvancedTracker(tracker_config)
106
+ processed_data = self.tracker.update(processed_data)
107
+ except Exception as e:
108
+ self.logger.warning(f"AdvancedTracker failed: {e}")
109
+
110
+ dwell_data = self._check_dwell_objects(processed_data, config)
111
+ self._update_tracking_state(dwell_data)
112
+ self._total_frame_counter += 1
113
+
114
+ frame_number = None
115
+ if stream_info:
116
+ input_settings = stream_info.get("input_settings", {})
117
+ start_frame = input_settings.get("start_frame")
118
+ end_frame = input_settings.get("end_frame")
119
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
120
+ frame_number = start_frame
121
+
122
+ counting_summary = self._count_categories(dwell_data, config)
123
+ total_counts = self.get_total_counts()
124
+ counting_summary['total_counts'] = total_counts
125
+
126
+ zone_analysis = {}
127
+ if config.zone_config and config.zone_config['zones']:
128
+ frame_data = processed_data
129
+ zone_analysis = count_objects_in_zones(frame_data, config.zone_config['zones'])
130
+ if zone_analysis:
131
+ zone_analysis = self._update_zone_tracking(zone_analysis, dwell_data, config)
132
+
133
+ alerts = self._check_alerts(counting_summary, zone_analysis, frame_number, config)
134
+ predictions = self._extract_predictions(dwell_data)
135
+
136
+ incidents_list = self._generate_incidents(counting_summary, zone_analysis, alerts, config, frame_number, stream_info)
137
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, zone_analysis, alerts, config, frame_number, stream_info)
138
+ business_analytics_list = self._generate_business_analytics(counting_summary, zone_analysis, alerts, config, stream_info, is_empty=True)
139
+ summary_list = self._generate_summary(counting_summary, zone_analysis, incidents_list, tracking_stats_list, business_analytics_list, alerts)
140
+
141
+ incidents = incidents_list[0] if incidents_list else {}
142
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
143
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
144
+ summary = summary_list[0] if summary_list else {}
145
+ agg_summary = {str(frame_number): {
146
+ "incidents": incidents,
147
+ "tracking_stats": tracking_stats,
148
+ "business_analytics": business_analytics,
149
+ "alerts": alerts,
150
+ "zone_analysis": zone_analysis,
151
+ "human_text": summary}
152
+ }
153
+
154
+ context.mark_completed()
155
+ result = self.create_result(
156
+ data={"agg_summary": agg_summary},
157
+ usecase=self.name,
158
+ category=self.category,
159
+ context=context
160
+ )
161
+ return result
162
+
163
+ def _check_dwell_objects(self, data: List[Dict], config: DwellConfig) -> List[Dict]:
164
+ dwell_data = []
165
+ current_time = time.time()
166
+
167
+ for det in data:
168
+ if det.get('category') not in self.target_categories:
169
+ continue
170
+ track_id = det.get('track_id')
171
+ bbox = det.get('bounding_box')
172
+ if not track_id or not bbox:
173
+ continue
174
+
175
+ centroid = self._calculate_centroid(bbox)
176
+ in_zone = self._is_in_zone(bbox, config.zone_config['zones'] if config.zone_config else None)
177
+
178
+ if track_id not in self._stationary_tracks:
179
+ self._stationary_tracks[track_id] = {
180
+ 'centroid': centroid,
181
+ 'frame_count': 1,
182
+ 'start_time': current_time,
183
+ 'bbox': bbox,
184
+ 'in_zone': in_zone
185
+ }
186
+ track_info = self._stationary_tracks[track_id]
187
+ if track_info['frame_count'] >= config.dwell_threshold and in_zone:
188
+ det['category'] = 'dwell_person'
189
+ dwell_data.append(det)
190
+ else:
191
+ track_info = self._stationary_tracks[track_id]
192
+ prev_centroid = track_info['centroid']
193
+ track_info['frame_count'] += 1
194
+ track_info['bbox'] = bbox
195
+ track_info['in_zone'] = in_zone
196
+
197
+ if self._is_centroid_stationary(centroid, prev_centroid, config.centroid_threshold):
198
+ if track_info['frame_count'] >= config.dwell_threshold and in_zone:
199
+ det['category'] = 'dwell_person'
200
+ dwell_data.append(det)
201
+ else:
202
+ track_info['frame_count'] = max(1, track_info['frame_count'] - 5)
203
+ track_info['centroid'] = centroid
204
+ track_info['start_time'] = current_time
205
+
206
+ return dwell_data
207
+
208
+ def _calculate_centroid(self, bbox: Dict) -> tuple:
209
+ if 'xmin' in bbox:
210
+ x = (bbox['xmin'] + bbox['xmax']) / 2
211
+ y = (bbox['ymin'] + bbox['ymax']) / 2
212
+ elif 'x1' in bbox:
213
+ x = (bbox['x1'] + bbox['x2']) / 2
214
+ y = (bbox['y1'] + bbox['y2']) / 2
215
+ else:
216
+ return (0, 0)
217
+ return (x, y)
218
+
219
+ def _is_centroid_stationary(self, centroid: tuple, prev_centroid: tuple, threshold: float) -> bool:
220
+ x1, y1 = centroid
221
+ x2, y2 = prev_centroid
222
+ distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
223
+ return distance < threshold
224
+
225
+ def _is_in_zone(self, bbox: Dict, zones: Optional[Dict[str, List[List[float]]]]) -> bool:
226
+ if not zones:
227
+ return True
228
+ center = get_bbox_center(bbox)
229
+ for zone_name, zone_polygon in zones.items():
230
+ polygon_points = [(point[0], point[1]) for point in zone_polygon]
231
+ if point_in_polygon(center, polygon_points):
232
+ return True
233
+ return False
234
+
235
+ def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: DwellConfig) -> Dict[str, Dict[str, Any]]:
236
+ if not zone_analysis or not config.zone_config or not config.zone_config['zones']:
237
+ return {}
238
+
239
+ enhanced_zone_analysis = {}
240
+ zones = config.zone_config['zones']
241
+ current_frame_zone_tracks = {}
242
+
243
+ for zone_name in zones.keys():
244
+ current_frame_zone_tracks[zone_name] = set()
245
+ if zone_name not in self._zone_current_track_ids:
246
+ self._zone_current_track_ids[zone_name] = set()
247
+ if zone_name not in self._zone_total_track_ids:
248
+ self._zone_total_track_ids[zone_name] = set()
249
+
250
+ for detection in detections:
251
+ track_id = detection.get("track_id")
252
+ if track_id is None:
253
+ continue
254
+ bbox = detection.get("bounding_box")
255
+ if not bbox:
256
+ continue
257
+ center_point = get_bbox_center(bbox)
258
+ for zone_name, zone_polygon in zones.items():
259
+ polygon_points = [(point[0], point[1]) for point in zone_polygon]
260
+ if point_in_polygon(center_point, polygon_points):
261
+ current_frame_zone_tracks[zone_name].add(track_id)
262
+
263
+ for zone_name, zone_counts in zone_analysis.items():
264
+ current_tracks = current_frame_zone_tracks.get(zone_name, set())
265
+ self._zone_current_track_ids[zone_name] = current_tracks
266
+ self._zone_total_track_ids[zone_name].update(current_tracks)
267
+ self._zone_current_counts[zone_name] = len(current_tracks)
268
+ self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
269
+ enhanced_zone_analysis[zone_name] = {
270
+ "current_count": self._zone_current_counts[zone_name],
271
+ "total_count": self._zone_total_counts[zone_name],
272
+ "current_track_ids": list(current_tracks),
273
+ "total_track_ids": list(self._zone_total_track_ids[zone_name]),
274
+ "original_counts": zone_counts
275
+ }
276
+
277
+ return enhanced_zone_analysis
278
+
279
+ def _check_alerts(self, summary: dict, zone_analysis: Dict, frame_number: Any, config: DwellConfig) -> List[Dict]:
280
+ def get_trend(data, lookback=900, threshold=0.6):
281
+ window = data[-lookback:] if len(data) >= lookback else data
282
+ if len(window) < 2:
283
+ return True
284
+ increasing = sum(1 for i in range(1, len(window)) if window[i] >= window[i - 1])
285
+ ratio = increasing / (len(window) - 1)
286
+ return ratio >= threshold
287
+
288
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
289
+ alerts = []
290
+ total_detections = summary.get("total_count", 0)
291
+ per_category_count = summary.get("per_category_count", {})
292
+
293
+ if not config.alert_config:
294
+ return alerts
295
+
296
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
297
+ for category, threshold in config.alert_config.count_thresholds.items():
298
+ if category == "all" and total_detections > threshold:
299
+ alerts.append({
300
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
301
+ "alert_id": f"alert_{category}_{frame_key}",
302
+ "incident_category": self.CASE_TYPE,
303
+ "threshold_level": threshold,
304
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
305
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
306
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
307
+ })
308
+ elif category == "dwell_person" and per_category_count.get(category, 0) > threshold:
309
+ alerts.append({
310
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
311
+ "alert_id": f"alert_{category}_{frame_key}",
312
+ "incident_category": self.CASE_TYPE,
313
+ "threshold_level": threshold,
314
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
315
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
316
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
317
+ })
318
+ return alerts
319
+
320
+ def _generate_incidents(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: DwellConfig,
321
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
322
+ incidents = []
323
+ total_detections = counting_summary.get("total_count", 0)
324
+ current_timestamp = self._get_current_timestamp_str(stream_info)
325
+ camera_info = self.get_camera_info_from_stream(stream_info)
326
+
327
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
328
+
329
+ if total_detections > 0:
330
+ level = "low"
331
+ intensity = 5.0
332
+ start_timestamp = self._get_start_timestamp_str(stream_info)
333
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
334
+ self.current_incident_end_timestamp = 'Incident still active'
335
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
336
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
337
+ self.current_incident_end_timestamp = current_timestamp
338
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
339
+ self.current_incident_end_timestamp = 'N/A'
340
+
341
+ if config.alert_config and config.alert_config.count_thresholds:
342
+ threshold = config.alert_config.count_thresholds.get("all", 15)
343
+ intensity = min(10.0, (total_detections / threshold) * 10)
344
+ if intensity >= 9:
345
+ level = "critical"
346
+ self._ascending_alert_list.append(3)
347
+ elif intensity >= 7:
348
+ level = "significant"
349
+ self._ascending_alert_list.append(2)
350
+ elif intensity >= 5:
351
+ level = "medium"
352
+ self._ascending_alert_list.append(1)
353
+ else:
354
+ level = "low"
355
+ self._ascending_alert_list.append(0)
356
+ else:
357
+ if total_detections > 30:
358
+ level = "critical"
359
+ intensity = 10.0
360
+ self._ascending_alert_list.append(3)
361
+ elif total_detections > 25:
362
+ level = "significant"
363
+ intensity = 9.0
364
+ self._ascending_alert_list.append(2)
365
+ elif total_detections > 15:
366
+ level = "medium"
367
+ intensity = 7.0
368
+ self._ascending_alert_list.append(1)
369
+ else:
370
+ level = "low"
371
+ intensity = min(10.0, total_detections / 3.0)
372
+ self._ascending_alert_list.append(0)
373
+
374
+ human_text_lines = [f"DWELL DETECTED @ {current_timestamp}:"]
375
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
376
+ human_text = "\n".join(human_text_lines)
377
+
378
+ alert_settings = []
379
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
380
+ alert_settings.append({
381
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
382
+ "incident_category": self.CASE_TYPE,
383
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
384
+ "ascending": True,
385
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
386
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
387
+ })
388
+
389
+ event = self.create_incident(
390
+ incident_id=self.CASE_TYPE + '_' + str(frame_number),
391
+ incident_type=self.CASE_TYPE,
392
+ severity_level=level,
393
+ human_text=human_text,
394
+ camera_info=camera_info,
395
+ alerts=alerts,
396
+ alert_settings=alert_settings,
397
+ start_time=start_timestamp,
398
+ end_time=self.current_incident_end_timestamp,
399
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
400
+ )
401
+ incidents.append(event)
402
+ else:
403
+ self._ascending_alert_list.append(0)
404
+ incidents.append({})
405
+
406
+ return incidents
407
+
408
+ def _generate_tracking_stats(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: DwellConfig,
409
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
410
+ camera_info = self.get_camera_info_from_stream(stream_info)
411
+ tracking_stats = []
412
+ total_detections = counting_summary.get("total_count", 0)
413
+ total_counts_dict = counting_summary.get("total_counts", {})
414
+ per_category_count = counting_summary.get("per_category_count", {})
415
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
416
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
417
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
418
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
419
+
420
+ total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
421
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
422
+
423
+ detections = []
424
+ for detection in counting_summary.get("detections", []):
425
+ bbox = detection.get("bounding_box", {})
426
+ category = detection.get("category", "dwell_person")
427
+ detection_obj = self.create_detection_object(category, bbox)
428
+ detections.append(detection_obj)
429
+
430
+ alert_settings = []
431
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
432
+ alert_settings.append({
433
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
434
+ "incident_category": self.CASE_TYPE,
435
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
436
+ "ascending": True,
437
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
438
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
439
+ })
440
+
441
+ # human_text_lines = [f"Tracking Statistics:"]
442
+ # human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
443
+ # if zone_analysis:
444
+ # human_text_lines.append("\tZones (current):")
445
+ # for zone_name, zone_data in zone_analysis.items():
446
+ # current_count = zone_data.get("current_count", 0)
447
+ # human_text_lines.append(f"\t{zone_name}: {int(current_count)}")
448
+ # else:
449
+ # for cat, count in per_category_count.items():
450
+ # human_text_lines.append(f"\t{cat}: {count}")
451
+ # human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
452
+ # if zone_analysis:
453
+ # human_text_lines.append("\tZones (total):")
454
+ # for zone_name, zone_data in zone_analysis.items():
455
+ # total_count = zone_data.get("total_count", 0)
456
+ # human_text_lines.append(f"\t{zone_name}: {int(total_count)}")
457
+ # else:
458
+ # for cat, count in total_counts_dict.items():
459
+ # if count > 0:
460
+ # human_text_lines.append(f"\t{cat}: {count}")
461
+ # human_text_lines.append(f"Alerts: {alerts[0].get('settings', {}) if alerts else 'None'} sent @ {current_timestamp}")
462
+ # human_text = "\n".join(human_text_lines)
463
+
464
+ human_text_lines = [f"Tracking Statistics:"]
465
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
466
+ # Append zone-wise current counts if available
467
+ if zone_analysis:
468
+ human_text_lines.append("\tZones (current):")
469
+ for zone_name, zone_data in zone_analysis.items():
470
+ current_count = 0
471
+ if isinstance(zone_data, dict):
472
+ if "current_count" in zone_data:
473
+ current_count = zone_data.get("current_count", 0)
474
+ else:
475
+ counts_dict = zone_data.get("original_counts") if isinstance(zone_data.get("original_counts"), dict) else zone_data
476
+ current_count = counts_dict.get(
477
+ "total",
478
+ sum(v for v in counts_dict.values() if isinstance(v, (int, float)))
479
+ )
480
+ human_text_lines.append(f"\t{zone_name}: {int(current_count)}")
481
+ else:
482
+ if any(count > 0 for count in per_category_count.values()):
483
+
484
+ for cat, count in per_category_count.items():
485
+ if count > 0:
486
+ human_text_lines.append(f"\t- {count} {cat.capitalize()} detected")
487
+ else:
488
+ human_text_lines.append(f"\t- No detections")
489
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
490
+ # Append zone-wise total counts if available
491
+ if zone_analysis:
492
+ human_text_lines.append("\tZones (total):")
493
+ for zone_name, zone_data in zone_analysis.items():
494
+ total_count = 0
495
+ if isinstance(zone_data, dict):
496
+ # Prefer the numeric cumulative total if available
497
+ if "total_count" in zone_data and isinstance(zone_data.get("total_count"), (int, float)):
498
+ total_count = zone_data.get("total_count", 0)
499
+ # Fallback: compute from list of total_track_ids if present
500
+ elif "total_track_ids" in zone_data and isinstance(zone_data.get("total_track_ids"), list):
501
+ total_count = len(zone_data.get("total_track_ids", []))
502
+ else:
503
+ # Last resort: try to sum numeric values present
504
+ counts_dict = zone_data if isinstance(zone_data, dict) else {}
505
+ total_count = sum(v for v in counts_dict.values() if isinstance(v, (int, float)))
506
+ human_text_lines.append(f"\t{zone_name}: {int(total_count)}")
507
+ else:
508
+ for cat, count in total_counts_dict.items():
509
+ if count > 0:
510
+ human_text_lines.append(f"\t{cat}: {count}")
511
+ if alerts:
512
+ for alert in alerts:
513
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
514
+ else:
515
+ human_text_lines.append("Alerts: None")
516
+ human_text = "\n".join(human_text_lines)
517
+
518
+ reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
519
+ tracking_stat = self.create_tracking_stats(
520
+ total_counts=total_counts,
521
+ current_counts=current_counts,
522
+ detections=detections,
523
+ human_text=human_text,
524
+ camera_info=camera_info,
525
+ alerts=alerts,
526
+ alert_settings=alert_settings,
527
+ reset_settings=reset_settings,
528
+ start_time=high_precision_start_timestamp,
529
+ reset_time=high_precision_reset_timestamp
530
+ )
531
+ tracking_stats.append(tracking_stat)
532
+ return tracking_stats
533
+
534
+ def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, alerts: Any, config: DwellConfig,
535
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
536
+ if is_empty:
537
+ return []
538
+ return []
539
+
540
+ def _generate_summary(self, summary: dict, zone_analysis: Dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
541
+ lines = {}
542
+ lines["Application Name"] = self.CASE_TYPE
543
+ lines["Application Version"] = self.CASE_VERSION
544
+ if len(incidents) > 0:
545
+ lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
546
+ if len(tracking_stats) > 0:
547
+ lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
548
+ if len(business_analytics) > 0:
549
+ lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
550
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
551
+ lines["Summary"] = "No Summary Data"
552
+ return ["\n".join(f"{k}: {v}" for k, v in lines.items())]
553
+
554
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
555
+ frame_track_ids = {det.get('track_id') for det in detections if det.get('track_id') is not None}
556
+ total_track_ids = set()
557
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
558
+ total_track_ids.update(s)
559
+ return {
560
+ "total_count": len(total_track_ids),
561
+ "current_frame_count": len(frame_track_ids),
562
+ "total_unique_track_ids": len(total_track_ids),
563
+ "current_frame_track_ids": list(frame_track_ids),
564
+ "last_update_time": time.time(),
565
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
566
+ }
567
+
568
+ def _update_tracking_state(self, detections: list):
569
+ if not hasattr(self, "_per_category_total_track_ids"):
570
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories + ['dwell_person']}
571
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories + ['dwell_person']}
572
+
573
+ for det in detections:
574
+ cat = det.get("category")
575
+ raw_track_id = det.get("track_id")
576
+ if cat not in self.target_categories + ['dwell_person'] or raw_track_id is None:
577
+ continue
578
+ bbox = det.get("bounding_box", det.get("bbox"))
579
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
580
+ det["track_id"] = canonical_id
581
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
582
+ self._current_frame_track_ids[cat].add(canonical_id)
583
+
584
+ def get_total_counts(self):
585
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
586
+
587
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
588
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
589
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
590
+
591
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
592
+ hours = int(timestamp // 3600)
593
+ minutes = int((timestamp % 3600) // 60)
594
+ seconds = round(float(timestamp % 60), 2)
595
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
596
+
597
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
598
+ """Get formatted current timestamp based on stream type."""
599
+
600
+ if not stream_info:
601
+ return "00:00:00.00"
602
+ if precision:
603
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
604
+ if frame_id:
605
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
606
+ else:
607
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
608
+ stream_time_str = self._format_timestamp_for_video(start_time)
609
+
610
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
611
+ else:
612
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
613
+
614
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
615
+ if frame_id:
616
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
617
+ else:
618
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
619
+
620
+ stream_time_str = self._format_timestamp_for_video(start_time)
621
+
622
+
623
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
624
+ else:
625
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
626
+ if stream_time_str:
627
+ try:
628
+ timestamp_str = stream_time_str.replace(" UTC", "")
629
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
630
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
631
+ return self._format_timestamp_for_stream(timestamp)
632
+ except:
633
+ return self._format_timestamp_for_stream(time.time())
634
+ else:
635
+ return self._format_timestamp_for_stream(time.time())
636
+
637
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
638
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
639
+ if not stream_info:
640
+ return "00:00:00"
641
+
642
+ if precision:
643
+ if self.start_timer is None:
644
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
645
+ return self._format_timestamp(self.start_timer)
646
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
647
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
648
+ return self._format_timestamp(self.start_timer)
649
+ else:
650
+ return self._format_timestamp(self.start_timer)
651
+
652
+ if self.start_timer is None:
653
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
654
+ return self._format_timestamp(self.start_timer)
655
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
656
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
657
+ return self._format_timestamp(self.start_timer)
658
+
659
+ else:
660
+ if self.start_timer is not None:
661
+ return self._format_timestamp(self.start_timer)
662
+
663
+ if self._tracking_start_time is None:
664
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
665
+ if stream_time_str:
666
+ try:
667
+ timestamp_str = stream_time_str.replace(" UTC", "")
668
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
669
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
670
+ except:
671
+ self._tracking_start_time = time.time()
672
+ else:
673
+ self._tracking_start_time = time.time()
674
+
675
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
676
+ dt = dt.replace(minute=0, second=0, microsecond=0)
677
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
678
+
679
+ def _count_categories(self, detections: list, config: DwellConfig) -> dict:
680
+ counts = {}
681
+ for det in detections:
682
+ cat = det.get('category', 'unknown')
683
+ counts[cat] = counts.get(cat, 0) + 1
684
+ return {
685
+ "total_count": sum(counts.values()),
686
+ "per_category_count": counts,
687
+ "detections": [
688
+ {
689
+ "bounding_box": det.get("bounding_box"),
690
+ "category": det.get("category"),
691
+ "confidence": det.get("confidence"),
692
+ "track_id": det.get("track_id"),
693
+ "frame_id": det.get("frame_id")
694
+ }
695
+ for det in detections
696
+ ]
697
+ }
698
+
699
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
700
+ return [
701
+ {
702
+ "category": det.get("category", "unknown"),
703
+ "confidence": det.get("confidence", 0.0),
704
+ "bounding_box": det.get("bounding_box", {})
705
+ }
706
+ for det in detections
707
+ ]
708
+
709
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
710
+ def _bbox_to_list(bbox):
711
+ if bbox is None:
712
+ return []
713
+ if isinstance(bbox, list):
714
+ return bbox[:4] if len(bbox) >= 4 else []
715
+ if isinstance(bbox, dict):
716
+ if "xmin" in bbox:
717
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
718
+ if "x1" in bbox:
719
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
720
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
721
+ return values[:4] if len(values) >= 4 else []
722
+ return []
723
+
724
+ l1 = _bbox_to_list(box1)
725
+ l2 = _bbox_to_list(box2)
726
+ if len(l1) < 4 or len(l2) < 4:
727
+ return 0.0
728
+ x1_min, y1_min, x1_max, y1_max = l1
729
+ x2_min, y2_min, x2_max, y2_max = l2
730
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
731
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
732
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
733
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
734
+ inter_x_min = max(x1_min, x2_min)
735
+ inter_y_min = max(y1_min, y2_min)
736
+ inter_x_max = min(x1_max, x2_max)
737
+ inter_y_max = min(y1_max, y2_max)
738
+ inter_w = max(0.0, inter_x_max - inter_x_min)
739
+ inter_h = max(0.0, inter_y_max - inter_y_min)
740
+ inter_area = inter_w * inter_h
741
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
742
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
743
+ union_area = area1 + area2 - inter_area
744
+ return (inter_area / union_area) if union_area > 0 else 0.0
745
+
746
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
747
+ if raw_id is None or bbox is None:
748
+ return raw_id
749
+ now = time.time()
750
+ if raw_id in self._track_aliases:
751
+ canonical_id = self._track_aliases[raw_id]
752
+ track_info = self._canonical_tracks.get(canonical_id)
753
+ if track_info is not None:
754
+ track_info["last_bbox"] = bbox
755
+ track_info["last_update"] = now
756
+ track_info["raw_ids"].add(raw_id)
757
+ return canonical_id
758
+ for canonical_id, info in self._canonical_tracks.items():
759
+ if now - info["last_update"] > self._track_merge_time_window:
760
+ continue
761
+ iou = self._compute_iou(bbox, info["last_bbox"])
762
+ if iou >= self._track_merge_iou_threshold:
763
+ self._track_aliases[raw_id] = canonical_id
764
+ info["last_bbox"] = bbox
765
+ info["last_update"] = now
766
+ info["raw_ids"].add(raw_id)
767
+ return canonical_id
768
+ canonical_id = raw_id
769
+ self._track_aliases[raw_id] = canonical_id
770
+ self._canonical_tracks[canonical_id] = {
771
+ "last_bbox": bbox,
772
+ "last_update": now,
773
+ "raw_ids": {raw_id},
774
+ }
775
+ return canonical_id
776
+
777
+ def _format_timestamp(self, timestamp: Any) -> str:
778
+ """Format a timestamp so that exactly two digits follow the decimal point (milliseconds).
779
+
780
+ The input can be either:
781
+ 1. A numeric Unix timestamp (``float`` / ``int``) – it will first be converted to a
782
+ string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
783
+ 2. A string already following the same layout.
784
+
785
+ The returned value preserves the overall format of the input but truncates or pads
786
+ the fractional seconds portion to **exactly two digits**.
787
+
788
+ Example
789
+ -------
790
+ >>> self._format_timestamp("2025-08-19-04:22:47.187574 UTC")
791
+ '2025-08-19-04:22:47.18 UTC'
792
+ """
793
+
794
+ # Convert numeric timestamps to the expected string representation first
795
+ if isinstance(timestamp, (int, float)):
796
+ timestamp = datetime.fromtimestamp(timestamp, timezone.utc).strftime(
797
+ '%Y-%m-%d-%H:%M:%S.%f UTC'
798
+ )
799
+
800
+ # Ensure we are working with a string from here on
801
+ if not isinstance(timestamp, str):
802
+ return str(timestamp)
803
+
804
+ # If there is no fractional component, simply return the original string
805
+ if '.' not in timestamp:
806
+ return timestamp
807
+
808
+ # Split out the main portion (up to the decimal point)
809
+ main_part, fractional_and_suffix = timestamp.split('.', 1)
810
+
811
+ # Separate fractional digits from the suffix (typically ' UTC')
812
+ if ' ' in fractional_and_suffix:
813
+ fractional_part, suffix = fractional_and_suffix.split(' ', 1)
814
+ suffix = ' ' + suffix # Re-attach the space removed by split
815
+ else:
816
+ fractional_part, suffix = fractional_and_suffix, ''
817
+
818
+ # Guarantee exactly two digits for the fractional part
819
+ fractional_part = (fractional_part + '00')[:2]
820
+
821
+ return f"{main_part}.{fractional_part}{suffix}"
822
+
823
+ def _get_tracking_start_time(self) -> str:
824
+ if self._tracking_start_time is None:
825
+ return "N/A"
826
+ return self._format_timestamp(self._tracking_start_time)
827
+
828
+ def _set_tracking_start_time(self) -> None:
829
+ self._tracking_start_time = time.time()