matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,864 @@
1
+ """
2
+ Cardiomegaly Classification Use Case for Post-Processing
3
+
4
+ This module provides Cardiomegaly Classification functionality.
5
+
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional
9
+ from dataclasses import asdict
10
+ import time
11
+ from datetime import datetime, timezone
12
+
13
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
14
+ from ..utils import (
15
+ filter_by_confidence,
16
+ filter_by_categories,
17
+ apply_category_mapping,
18
+ count_objects_by_category,
19
+ count_objects_in_zones,
20
+ calculate_counting_summary,
21
+ match_results_structure,
22
+ bbox_smoothing,
23
+ BBoxSmoothingConfig,
24
+ BBoxSmoothingTracker
25
+ )
26
+ from dataclasses import dataclass, field
27
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
28
+
29
+
30
+ @dataclass
31
+ class CardiomegalyConfig(BaseConfig):
32
+ """Configuration for Cardiomegaly Classification detection use case in Cardiomegaly Classification monitoring."""
33
+ # Smoothing configuration
34
+ enable_smoothing: bool = True
35
+ smoothing_algorithm: str = "observability" # "window" or "observability"
36
+ smoothing_window_size: int = 20
37
+ smoothing_cooldown_frames: int = 5
38
+ smoothing_confidence_range_factor: float = 0.5
39
+
40
+ #confidence thresholds
41
+ confidence_threshold: float = 0.6
42
+
43
+ usecase_categories: List[str] = field(
44
+ default_factory=lambda: ['cardiomegaly', 'normal']
45
+ )
46
+
47
+ target_categories: List[str] = field(
48
+ default_factory=lambda: ['cardiomegaly', 'normal']
49
+ )
50
+
51
+ alert_config: Optional[AlertConfig] = None
52
+
53
+ index_to_category: Optional[Dict[int, str]] = field(
54
+ default_factory=lambda: {
55
+ 0: "cardiomegaly",
56
+ 1: "normal"
57
+ }
58
+ )
59
+
60
+
61
+ class CardiomegalyUseCase(BaseProcessor):
62
+ # Human-friendly display names for categories
63
+ CATEGORY_DISPLAY = {
64
+ "Cardiomegaly": "cardiomegaly",
65
+ "Normal": "normal"
66
+ }
67
+
68
+ def __init__(self):
69
+ super().__init__("cardiomegaly_classification")
70
+ self.category = "healthcare"
71
+
72
+ # List of categories to track
73
+ self.target_categories = ['cardiomegaly', 'normal']
74
+
75
+ # Initialize smoothing tracker
76
+ self.smoothing_tracker = None
77
+
78
+ # Initialize advanced tracker (will be created on first use)
79
+ self.tracker = None
80
+
81
+ # Initialize tracking state variables
82
+ self._total_frame_counter = 0
83
+ self._global_frame_offset = 0
84
+
85
+ # Track start time for "TOTAL SINCE" calculation
86
+ self._tracking_start_time = None
87
+
88
+ # ------------------------------------------------------------------ #
89
+ # Canonical tracking aliasing to avoid duplicate counts #
90
+ # ------------------------------------------------------------------ #
91
+ # Maps raw tracker-generated IDs to stable canonical IDs that persist
92
+ # even if the underlying tracker re-assigns a new ID after a short
93
+ # interruption. This mirrors the logic used in people_counting to
94
+ # provide accurate unique counting.
95
+ self._track_aliases: Dict[Any, Any] = {}
96
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
97
+ # Tunable parameters – adjust if necessary for specific scenarios
98
+ self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
99
+ self._track_merge_time_window: float = 7.0 # seconds within which to merge
100
+
101
+ self._ascending_alert_list: List[int] = []
102
+ self.current_incident_end_timestamp: str = "N/A"
103
+
104
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
105
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
106
+ """
107
+ Main entry point for post-processing.
108
+ Applies category mapping, smoothing, counting, alerting, and summary generation.
109
+ Returns a ProcessingResult with all relevant outputs.
110
+ """
111
+ start_time = time.time()
112
+ # Ensure config is correct type
113
+ if not isinstance(config, CardiomegalyConfig):
114
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
115
+ context=context)
116
+ if context is None:
117
+ context = ProcessingContext()
118
+
119
+ # Detect input format and store in context
120
+ input_format = match_results_structure(data)
121
+ context.input_format = input_format
122
+ context.confidence_threshold = config.confidence_threshold
123
+
124
+ if config.confidence_threshold is not None:
125
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
126
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
127
+ else:
128
+ processed_data = data
129
+ self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
130
+
131
+ # Step 2: Apply category mapping if provided
132
+ if config.index_to_category:
133
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
134
+ self.logger.debug("Applied category mapping")
135
+
136
+ if config.target_categories:
137
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
138
+ self.logger.debug(f"Applied category filtering")
139
+
140
+ # Apply bbox smoothing if enabled
141
+ if config.enable_smoothing:
142
+ if self.smoothing_tracker is None:
143
+ smoothing_config = BBoxSmoothingConfig(
144
+ smoothing_algorithm=config.smoothing_algorithm,
145
+ window_size=config.smoothing_window_size,
146
+ cooldown_frames=config.smoothing_cooldown_frames,
147
+ confidence_threshold=config.confidence_threshold,
148
+ confidence_range_factor=config.smoothing_confidence_range_factor,
149
+ enable_smoothing=True
150
+ )
151
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
152
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
153
+
154
+
155
+ # Advanced tracking (BYTETracker-like)
156
+ try:
157
+ from ..advanced_tracker import AdvancedTracker
158
+ from ..advanced_tracker.config import TrackerConfig
159
+
160
+ # Create tracker instance if it doesn't exist (preserves state across frames)
161
+ if self.tracker is None:
162
+ tracker_config = TrackerConfig()
163
+ self.tracker = AdvancedTracker(tracker_config)
164
+ self.logger.info("Initialized AdvancedTracker for Monitoring and tracking")
165
+
166
+ # The tracker expects the data in the same format as input
167
+ # It will add track_id and frame_id to each detection
168
+ processed_data = self.tracker.update(processed_data)
169
+
170
+ except Exception as e:
171
+ # If advanced tracker fails, fallback to unsmoothed detections
172
+ self.logger.warning(f"AdvancedTracker failed: {e}")
173
+
174
+ # Update tracking state for total count per label
175
+ self._update_tracking_state(processed_data)
176
+
177
+ # Update frame counter
178
+ self._total_frame_counter += 1
179
+
180
+ # Extract frame information from stream_info
181
+ frame_number = None
182
+ if stream_info:
183
+ input_settings = stream_info.get("input_settings", {})
184
+ start_frame = input_settings.get("start_frame")
185
+ end_frame = input_settings.get("end_frame")
186
+ # If start and end frame are the same, it's a single frame
187
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
188
+ frame_number = start_frame
189
+
190
+ # Compute summaries and alerts
191
+ general_counting_summary = calculate_counting_summary(data) #done
192
+ counting_summary = self._count_categories(processed_data, config) #done
193
+ # Add total unique counts after tracking using only local state
194
+ total_counts = self.get_total_counts() #done
195
+ counting_summary['total_counts'] = total_counts #done
196
+
197
+ alerts = self._check_alerts(counting_summary, frame_number, config)
198
+ # Step: Generate structured incidents, tracking stats and business analytics with frame-based keys
199
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
200
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
201
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=True)
202
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
203
+
204
+ # Extract frame-based dictionaries from the lists
205
+ incidents = incidents_list[0] if incidents_list else {}
206
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
207
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
208
+ summary = summary_list[0] if summary_list else {}
209
+ agg_summary = {str(frame_number): {
210
+ "incidents": incidents,
211
+ "tracking_stats": tracking_stats,
212
+ "business_analytics": business_analytics,
213
+ "alerts": alerts,
214
+ "human_text": summary}
215
+ }
216
+ context.mark_completed()
217
+
218
+ # Build result object following the new pattern
219
+ result = self.create_result(
220
+ data={"agg_summary": agg_summary},
221
+ usecase=self.name,
222
+ category=self.category,
223
+ context=context
224
+ )
225
+
226
+ return result
227
+
228
+ def _check_alerts(self, summary: dict, frame_number:Any, config: CardiomegalyConfig) -> List[Dict]:
229
+ """
230
+ Check if any alert thresholds are exceeded and return alert dicts.
231
+ """
232
+ def get_trend(data, lookback=900, threshold=0.6):
233
+ '''
234
+ Determine if the trend is ascending or descending based on actual value progression.
235
+ Now works with values 0,1,2,3 (not just binary).
236
+ '''
237
+ window = data[-lookback:] if len(data) >= lookback else data
238
+ if len(window) < 2:
239
+ return True # not enough data to determine trend
240
+ increasing = 0
241
+ total = 0
242
+ for i in range(1, len(window)):
243
+ if window[i] >= window[i - 1]:
244
+ increasing += 1
245
+ total += 1
246
+ ratio = increasing / total
247
+ if ratio >= threshold:
248
+ return True
249
+ elif ratio <= (1 - threshold):
250
+ return False
251
+
252
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
253
+ alerts = []
254
+ total_detections = summary.get("total_count", 0) #CURRENT combined total count of all classes
255
+ total_counts_dict = summary.get("total_counts", {}) #TOTAL cumulative counts per class
256
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
257
+ per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
258
+
259
+ if not config.alert_config:
260
+ return alerts
261
+
262
+ total = summary.get("total_count", 0)
263
+ #self._ascending_alert_list
264
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
265
+
266
+ for category, threshold in config.alert_config.count_thresholds.items():
267
+ if category == "all" and total > threshold:
268
+
269
+ alerts.append({
270
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
271
+ "alert_id": "alert_"+category+'_'+frame_key,
272
+ "incident_category": self.CASE_TYPE,
273
+ "threshold_level": threshold,
274
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
275
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
276
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
277
+ }
278
+ })
279
+ elif category in summary.get("per_category_count", {}):
280
+ count = summary.get("per_category_count", {})[category]
281
+ if count > threshold: # Fixed logic: alert when EXCEEDING threshold
282
+ alerts.append({
283
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
284
+ "alert_id": "alert_"+category+'_'+frame_key,
285
+ "incident_category": self.CASE_TYPE,
286
+ "threshold_level": threshold,
287
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
288
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
289
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
290
+ }
291
+ })
292
+ else:
293
+ pass
294
+ return alerts
295
+
296
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: CardiomegalyConfig,
297
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
298
+ Dict]:
299
+ """Generate structured incidents for the output format with frame-based keys."""
300
+
301
+ incidents = []
302
+ total_detections = counting_summary.get("total_count", 0)
303
+ current_timestamp = self._get_current_timestamp_str(stream_info)
304
+ camera_info = self.get_camera_info_from_stream(stream_info)
305
+
306
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
307
+
308
+ if total_detections > 0:
309
+ # Determine event level based on thresholds
310
+ level = "low"
311
+ intensity = 5.0
312
+ start_timestamp = self._get_start_timestamp_str(stream_info)
313
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
314
+ self.current_incident_end_timestamp = 'Incident still active'
315
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
316
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
317
+ self.current_incident_end_timestamp = current_timestamp
318
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
319
+ self.current_incident_end_timestamp = 'N/A'
320
+
321
+ if config.alert_config and config.alert_config.count_thresholds:
322
+ threshold = config.alert_config.count_thresholds.get("all", 15)
323
+ intensity = min(10.0, (total_detections / threshold) * 10)
324
+
325
+ if intensity >= 9:
326
+ level = "critical"
327
+ self._ascending_alert_list.append(3)
328
+ elif intensity >= 7:
329
+ level = "significant"
330
+ self._ascending_alert_list.append(2)
331
+ elif intensity >= 5:
332
+ level = "medium"
333
+ self._ascending_alert_list.append(1)
334
+ else:
335
+ level = "low"
336
+ self._ascending_alert_list.append(0)
337
+ else:
338
+ if total_detections > 30:
339
+ level = "critical"
340
+ intensity = 10.0
341
+ self._ascending_alert_list.append(3)
342
+ elif total_detections > 25:
343
+ level = "significant"
344
+ intensity = 9.0
345
+ self._ascending_alert_list.append(2)
346
+ elif total_detections > 15:
347
+ level = "medium"
348
+ intensity = 7.0
349
+ self._ascending_alert_list.append(1)
350
+ else:
351
+ level = "low"
352
+ intensity = min(10.0, total_detections / 3.0)
353
+ self._ascending_alert_list.append(0)
354
+
355
+ # Generate human text in new format
356
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
357
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
358
+ human_text = "\n".join(human_text_lines)
359
+
360
+ alert_settings=[]
361
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
362
+ alert_settings.append({
363
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
364
+ "incident_category": self.CASE_TYPE,
365
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
366
+ "ascending": True,
367
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
368
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
369
+ }
370
+ })
371
+
372
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
373
+ severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
374
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
375
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
376
+ incidents.append(event)
377
+
378
+ else:
379
+ self._ascending_alert_list.append(0)
380
+ incidents.append({})
381
+
382
+ return incidents
383
+
384
+ def _generate_tracking_stats(
385
+ self,
386
+ counting_summary: Dict,
387
+ alerts: List,
388
+ config: CardiomegalyConfig,
389
+ frame_number: Optional[int] = None,
390
+ stream_info: Optional[Dict[str, Any]] = None
391
+ ) -> List[Dict]:
392
+ """Generate structured tracking stats matching eg.json format."""
393
+ camera_info = self.get_camera_info_from_stream(stream_info)
394
+
395
+ # frame_key = str(frame_number) if frame_number is not None else "current_frame"
396
+ # tracking_stats = [{frame_key: []}]
397
+ # frame_tracking_stats = tracking_stats[0][frame_key]
398
+ tracking_stats = []
399
+
400
+ total_detections = counting_summary.get("total_count", 0) #CURRENT total count of all classes
401
+ total_counts_dict = counting_summary.get("total_counts", {}) #TOTAL cumulative counts per class
402
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
403
+ per_category_count = counting_summary.get("per_category_count", {}) #CURRENT count per class
404
+
405
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
406
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
407
+
408
+ # Create high precision timestamps for input_timestamp and reset_timestamp
409
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
410
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
411
+
412
+ # Build total_counts array in expected format
413
+ total_counts = []
414
+ for cat, count in total_counts_dict.items():
415
+ if count > 0:
416
+ total_counts.append({
417
+ "category": cat,
418
+ "count": count
419
+ })
420
+
421
+ # Build current_counts array in expected format
422
+ current_counts = []
423
+ for cat, count in per_category_count.items():
424
+ if count > 0 or total_detections > 0: # Include even if 0 when there are detections
425
+ current_counts.append({
426
+ "category": cat,
427
+ "count": count
428
+ })
429
+
430
+ # Prepare detections without confidence scores (as per eg.json)
431
+ detections = []
432
+ for detection in counting_summary.get("detections", []):
433
+ category = detection.get("category", "unknown")
434
+ detection_obj = {"category": category}
435
+ detections.append(detection_obj)
436
+
437
+ # Build alert_settings array in expected format
438
+ alert_settings = []
439
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
440
+ alert_settings.append({
441
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
442
+ "incident_category": self.CASE_TYPE,
443
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
444
+ "ascending": True,
445
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
446
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
447
+ }
448
+ })
449
+
450
+ # Generate human_text in expected format
451
+ human_text_lines = [f"Tracking Statistics:"]
452
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
453
+
454
+ for cat, count in per_category_count.items():
455
+ human_text_lines.append(f"\t{cat}: {count}")
456
+
457
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
458
+ for cat, count in total_counts_dict.items():
459
+ if count > 0:
460
+ human_text_lines.append(f"\t{cat}: {count}")
461
+
462
+ if alerts:
463
+ for alert in alerts:
464
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
465
+ else:
466
+ human_text_lines.append("Alerts: None")
467
+
468
+ human_text = "\n".join(human_text_lines)
469
+ reset_settings=[
470
+ {
471
+ "interval_type": "daily",
472
+ "reset_time": {
473
+ "value": 9,
474
+ "time_unit": "hour"
475
+ }
476
+ }
477
+ ]
478
+
479
+ tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
480
+ detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
481
+ reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
482
+ reset_time=high_precision_reset_timestamp)
483
+
484
+ tracking_stats.append(tracking_stat)
485
+ return tracking_stats
486
+
487
+ def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: CardiomegalyConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
488
+ """Generate standardized business analytics for the agg_summary structure."""
489
+ if is_empty:
490
+ return []
491
+
492
+ #-----IF YOUR USECASE NEEDS BUSINESS ANALYTICS, YOU CAN USE THIS FUNCTION------#
493
+ #camera_info = self.get_camera_info_from_stream(stream_info)
494
+ # business_analytics = self.create_business_analytics(nalysis_name, statistics,
495
+ # human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
496
+ # reset_settings)
497
+ # return business_analytics
498
+
499
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
500
+ """
501
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
502
+ """
503
+ lines = {}
504
+ lines["Application Name"] = self.CASE_TYPE
505
+ lines["Application Version"] = self.CASE_VERSION
506
+ if len(incidents) > 0:
507
+ lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
508
+ if len(tracking_stats) > 0:
509
+ lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
510
+ if len(business_analytics) > 0:
511
+ lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
512
+
513
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
514
+ lines["Summary"] = "No Summary Data"
515
+
516
+ return [lines]
517
+
518
+ def _count_categories(self, detections: list, config: CardiomegalyConfig) -> dict:
519
+ """
520
+ Count the number of detections per category and return a summary dict.
521
+ The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', etc.
522
+ Output structure will include 'track_id' for each detection as per AdvancedTracker output.
523
+ """
524
+ counts = {}
525
+ for det in detections:
526
+ cat = det.get('category', 'unknown')
527
+ counts[cat] = counts.get(cat, 0) + 1
528
+ # Each detection dict will now include 'track_id' (and possibly 'frame_id')
529
+ return {
530
+ "total_count": sum(counts.values()),
531
+ "per_category_count": counts,
532
+ "detections": [
533
+ {
534
+ "bounding_box": det.get("bounding_box"),
535
+ "category": det.get("category"),
536
+ "confidence": det.get("confidence"),
537
+ "track_id": det.get("track_id"),
538
+ "frame_id": det.get("frame_id")
539
+ }
540
+ for det in detections
541
+ ]
542
+ }
543
+
544
+ def _generate_insights(self, summary: dict, config: CardiomegalyConfig) -> List[str]:
545
+ """
546
+ Generate human-readable insights for each category.
547
+ """
548
+ insights = []
549
+ per_cat = summary.get("per_category_count", {})
550
+ total_detections = summary.get("total_count", 0)
551
+
552
+ if total_detections == 0:
553
+ insights.append("No detections in the scene")
554
+ return insights
555
+ insights.append(f"EVENT: Detected {total_detections} in the scene")
556
+ # Intensity calculation based on threshold percentage
557
+ intensity_threshold = None
558
+ if (config.alert_config and
559
+ config.alert_config.count_thresholds and
560
+ "all" in config.alert_config.count_thresholds):
561
+ intensity_threshold = config.alert_config.count_thresholds["all"]
562
+
563
+ if intensity_threshold is not None:
564
+ # Calculate percentage relative to threshold
565
+ percentage = (total_detections / intensity_threshold) * 100
566
+
567
+ if percentage < 20:
568
+ insights.append(f"INTENSITY: Low congestion in the scene ({percentage:.1f}% of capacity)")
569
+ elif percentage <= 50:
570
+ insights.append(f"INTENSITY: Moderate congestion in the scene ({percentage:.1f}% of capacity)")
571
+ elif percentage <= 70:
572
+ insights.append(f"INTENSITY: Heavy congestion in the scene ({percentage:.1f}% of capacity)")
573
+ else:
574
+ insights.append(f"INTENSITY: Severe congestion in the scene ({percentage:.1f}% of capacity)")
575
+
576
+
577
+ for cat, count in per_cat.items():
578
+ display = self.CATEGORY_DISPLAY.get(cat, cat)
579
+ insights.append(f"{display}:{count}")
580
+ return insights
581
+
582
+
583
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
584
+ """
585
+ Extract prediction details for output (category, confidence, bounding box).
586
+ """
587
+ return [
588
+ {
589
+ "category": det.get("category", "unknown"),
590
+ "confidence": det.get("confidence", 0.0),
591
+ "bounding_box": det.get("bounding_box", {})
592
+ }
593
+ for det in detections
594
+ ]
595
+
596
+ def _generate_summary(self, summary: dict, alerts: List) -> str:
597
+ """
598
+ Generate a human_text string for the result, including per-category insights if available.
599
+ Adds a tab before each label for better formatting.
600
+ Also always includes the cumulative count so far.
601
+ """
602
+ total = summary.get("total_count", 0)
603
+ per_cat = summary.get("per_category_count", {})
604
+ cumulative = summary.get("total_counts", {})
605
+ cumulative_total = sum(cumulative.values()) if cumulative else 0
606
+ lines = []
607
+ if total > 0:
608
+ lines.append(f"{total} detections")
609
+ if per_cat:
610
+ lines.append("detections:")
611
+ for cat, count in per_cat.items():
612
+ lines.append(f"\t{cat}:{count}")
613
+ else:
614
+ lines.append("No detections")
615
+ lines.append(f"Total detections: {cumulative_total}")
616
+ if alerts:
617
+ lines.append(f"{len(alerts)} alert(s)")
618
+ return "\n".join(lines)
619
+
620
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
621
+ """
622
+ Get detailed information about track IDs (per frame).
623
+ """
624
+ # Collect all track_ids in this frame
625
+ frame_track_ids = set()
626
+ for det in detections:
627
+ tid = det.get('track_id')
628
+ if tid is not None:
629
+ frame_track_ids.add(tid)
630
+ # Use persistent total set for unique counting
631
+ total_track_ids = set()
632
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
633
+ total_track_ids.update(s)
634
+ return {
635
+ "total_count": len(total_track_ids),
636
+ "current_frame_count": len(frame_track_ids),
637
+ "total_unique_track_ids": len(total_track_ids),
638
+ "current_frame_track_ids": list(frame_track_ids),
639
+ "last_update_time": time.time(),
640
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
641
+ }
642
+
643
+ def _update_tracking_state(self, detections: list):
644
+ """
645
+ Track unique categories track_ids per category for total count after tracking.
646
+ Applies canonical ID merging to avoid duplicate counting when the underlying
647
+ tracker loses an object temporarily and assigns a new ID.
648
+ """
649
+ # Lazily initialise storage dicts
650
+ if not hasattr(self, "_per_category_total_track_ids"):
651
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
652
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
653
+
654
+ for det in detections:
655
+ cat = det.get("category")
656
+ raw_track_id = det.get("track_id")
657
+ if cat not in self.target_categories or raw_track_id is None:
658
+ continue
659
+ bbox = det.get("bounding_box", det.get("bbox"))
660
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
661
+ # Propagate canonical ID back to detection so downstream logic uses it
662
+ det["track_id"] = canonical_id
663
+
664
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
665
+ self._current_frame_track_ids[cat].add(canonical_id)
666
+
667
+ def get_total_counts(self):
668
+ """
669
+ Return total unique track_id count for each category.
670
+ """
671
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
672
+
673
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
674
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
675
+ hours = int(timestamp // 3600)
676
+ minutes = int((timestamp % 3600) // 60)
677
+ seconds = timestamp % 60
678
+ return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
679
+
680
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
681
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
682
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
683
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
684
+
685
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
686
+ """Get formatted current timestamp based on stream type."""
687
+ if not stream_info:
688
+ return "00:00:00.00"
689
+
690
+ # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
691
+ if precision:
692
+ if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
693
+ stream_time_str = stream_info.get("video_timestamp", "")
694
+ return stream_time_str[:8]
695
+ else:
696
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
697
+
698
+ if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
699
+ # If video format, return video timestamp
700
+ stream_time_str = stream_info.get("video_timestamp", "")
701
+ return stream_time_str[:8]
702
+ else:
703
+ # For streams, use stream_time from stream_info
704
+ stream_time_str = stream_info.get("stream_time", "")
705
+ if stream_time_str:
706
+ # Parse the high precision timestamp string to get timestamp
707
+ try:
708
+ # Remove " UTC" suffix and parse
709
+ timestamp_str = stream_time_str.replace(" UTC", "")
710
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
711
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
712
+ return self._format_timestamp_for_stream(timestamp)
713
+ except:
714
+ # Fallback to current time if parsing fails
715
+ return self._format_timestamp_for_stream(time.time())
716
+ else:
717
+ return self._format_timestamp_for_stream(time.time())
718
+
719
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
720
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
721
+ if not stream_info:
722
+ return "00:00:00"
723
+
724
+ is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
725
+ if precision:
726
+ if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
727
+ return "00:00:00"
728
+ else:
729
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
730
+
731
+
732
+ if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
733
+ # If video format, start from 00:00:00
734
+ return "00:00:00"
735
+ else:
736
+ # For streams, use tracking start time or current time with minutes/seconds reset
737
+ if self._tracking_start_time is None:
738
+ # Try to extract timestamp from stream_time string
739
+ stream_time_str = stream_info.get("stream_time", "")
740
+ if stream_time_str:
741
+ try:
742
+ # Remove " UTC" suffix and parse
743
+ timestamp_str = stream_time_str.replace(" UTC", "")
744
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
745
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
746
+ except:
747
+ # Fallback to current time if parsing fails
748
+ self._tracking_start_time = time.time()
749
+ else:
750
+ self._tracking_start_time = time.time()
751
+
752
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
753
+ # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
754
+ dt = dt.replace(minute=0, second=0, microsecond=0)
755
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
756
+
757
+ # ------------------------------------------------------------------ #
758
+ # Canonical ID helpers #
759
+ # ------------------------------------------------------------------ #
760
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
761
+ """Compute IoU between two bounding boxes which may be dicts or lists.
762
+ Falls back to 0 when insufficient data is available."""
763
+
764
+ # Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
765
+ def _bbox_to_list(bbox):
766
+ if bbox is None:
767
+ return []
768
+ if isinstance(bbox, list):
769
+ return bbox[:4] if len(bbox) >= 4 else []
770
+ if isinstance(bbox, dict):
771
+ if "xmin" in bbox:
772
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
773
+ if "x1" in bbox:
774
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
775
+ # Fallback: first four numeric values
776
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
777
+ return values[:4] if len(values) >= 4 else []
778
+ return []
779
+
780
+ l1 = _bbox_to_list(box1)
781
+ l2 = _bbox_to_list(box2)
782
+ if len(l1) < 4 or len(l2) < 4:
783
+ return 0.0
784
+ x1_min, y1_min, x1_max, y1_max = l1
785
+ x2_min, y2_min, x2_max, y2_max = l2
786
+
787
+ # Ensure correct order
788
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
789
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
790
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
791
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
792
+
793
+ inter_x_min = max(x1_min, x2_min)
794
+ inter_y_min = max(y1_min, y2_min)
795
+ inter_x_max = min(x1_max, x2_max)
796
+ inter_y_max = min(y1_max, y2_max)
797
+
798
+ inter_w = max(0.0, inter_x_max - inter_x_min)
799
+ inter_h = max(0.0, inter_y_max - inter_y_min)
800
+ inter_area = inter_w * inter_h
801
+
802
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
803
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
804
+ union_area = area1 + area2 - inter_area
805
+
806
+ return (inter_area / union_area) if union_area > 0 else 0.0
807
+
808
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
809
+ """Return a stable canonical ID for a raw tracker ID, merging fragmented
810
+ tracks when IoU and temporal constraints indicate they represent the
811
+ same physical."""
812
+ if raw_id is None or bbox is None:
813
+ # Nothing to merge
814
+ return raw_id
815
+
816
+ now = time.time()
817
+
818
+ # Fast path – raw_id already mapped
819
+ if raw_id in self._track_aliases:
820
+ canonical_id = self._track_aliases[raw_id]
821
+ track_info = self._canonical_tracks.get(canonical_id)
822
+ if track_info is not None:
823
+ track_info["last_bbox"] = bbox
824
+ track_info["last_update"] = now
825
+ track_info["raw_ids"].add(raw_id)
826
+ return canonical_id
827
+
828
+ # Attempt to merge with an existing canonical track
829
+ for canonical_id, info in self._canonical_tracks.items():
830
+ # Only consider recently updated tracks
831
+ if now - info["last_update"] > self._track_merge_time_window:
832
+ continue
833
+ iou = self._compute_iou(bbox, info["last_bbox"])
834
+ if iou >= self._track_merge_iou_threshold:
835
+ # Merge
836
+ self._track_aliases[raw_id] = canonical_id
837
+ info["last_bbox"] = bbox
838
+ info["last_update"] = now
839
+ info["raw_ids"].add(raw_id)
840
+ return canonical_id
841
+
842
+ # No match – register new canonical track
843
+ canonical_id = raw_id
844
+ self._track_aliases[raw_id] = canonical_id
845
+ self._canonical_tracks[canonical_id] = {
846
+ "last_bbox": bbox,
847
+ "last_update": now,
848
+ "raw_ids": {raw_id},
849
+ }
850
+ return canonical_id
851
+
852
+ def _format_timestamp(self, timestamp: float) -> str:
853
+ """Format a timestamp for human-readable output."""
854
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
855
+
856
+ def _get_tracking_start_time(self) -> str:
857
+ """Get the tracking start time, formatted as a string."""
858
+ if self._tracking_start_time is None:
859
+ return "N/A"
860
+ return self._format_timestamp(self._tracking_start_time)
861
+
862
+ def _set_tracking_start_time(self) -> None:
863
+ """Set the tracking start time to the current time."""
864
+ self._tracking_start_time = time.time()