matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,899 @@
1
+ """
2
+ warehouse object Monitoring Use Case for Post-Processing
3
+
4
+ This module provides warehouse object monitoring functionality ,
5
+ zone analysis, and alert generation.
6
+
7
+ """
8
+
9
+ from typing import Any, Dict, List, Optional
10
+ from dataclasses import asdict
11
+ import time
12
+ from datetime import datetime, timezone
13
+ import copy # Added for deep copying detections to preserve original masks
14
+
15
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
16
+ from ..utils import (
17
+ filter_by_confidence,
18
+ filter_by_categories,
19
+ apply_category_mapping,
20
+ count_objects_by_category,
21
+ count_objects_in_zones,
22
+ calculate_counting_summary,
23
+ match_results_structure,
24
+ bbox_smoothing,
25
+ BBoxSmoothingConfig,
26
+ BBoxSmoothingTracker
27
+ )
28
+ from dataclasses import dataclass, field
29
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
30
+
31
+
32
+ @dataclass
33
+ class WarehouseObjectConfig(BaseConfig):
34
+ """Configuration for Warehouse pallet detection use case in warehouse pallet monitoring."""
35
+ # Smoothing configuration
36
+ enable_smoothing: bool = True
37
+ smoothing_algorithm: str = "observability" # "window" or "observability"
38
+ smoothing_window_size: int = 20
39
+ smoothing_cooldown_frames: int = 5
40
+ smoothing_confidence_range_factor: float = 0.5
41
+
42
+ #confidence thresholds
43
+ confidence_threshold: float = 0.90
44
+
45
+ usecase_categories: List[str] = field(
46
+ default_factory=lambda: ['pallet']
47
+ )
48
+
49
+ target_categories: List[str] = field(
50
+ default_factory=lambda: ['pallet']
51
+ )
52
+
53
+ alert_config: Optional[AlertConfig] = None
54
+
55
+ index_to_category: Optional[Dict[int, str]] = field(
56
+ default_factory=lambda: {
57
+ 0: "pallet"
58
+ }
59
+ )
60
+
61
+
62
+ class WarehouseObjectUseCase(BaseProcessor):
63
+
64
+ # Human-friendly display names for categories
65
+ CATEGORY_DISPLAY = {
66
+ "pallet": "pallet"
67
+ }
68
+ def __init__(self):
69
+ super().__init__("warehouse_object_segmentation")
70
+ self.category = "retail"
71
+
72
+ # List of categories to track
73
+ self.target_categories = ["pallet"]
74
+
75
+ self.CASE_TYPE: Optional[str] = 'warehouse_object_segmentation'
76
+ self.CASE_VERSION: Optional[str] = '1.3'
77
+
78
+ # Initialize smoothing tracker
79
+ self.smoothing_tracker = None
80
+
81
+ # Initialize advanced tracker (will be created on first use)
82
+ self.tracker = None
83
+
84
+ # Initialize tracking state variables
85
+ self._total_frame_counter = 0
86
+ self._global_frame_offset = 0
87
+
88
+ # Track start time for "TOTAL SINCE" calculation
89
+ self._tracking_start_time = None
90
+
91
+ # ------------------------------------------------------------------ #
92
+ # Canonical tracking aliasing to avoid duplicate counts #
93
+ # ------------------------------------------------------------------ #
94
+ # Maps raw tracker-generated IDs to stable canonical IDs that persist
95
+ # even if the underlying tracker re-assigns a new ID after a short
96
+ # interruption. This mirrors the logic used in people_counting to
97
+ # provide accurate unique counting.
98
+ self._track_aliases: Dict[Any, Any] = {}
99
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
100
+ # Tunable parameters – adjust if necessary for specific scenarios
101
+ self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
102
+ self._track_merge_time_window: float = 7.0 # seconds within which to merge
103
+
104
+ self._ascending_alert_list: List[int] = []
105
+ self.current_incident_end_timestamp: str = "N/A"
106
+
107
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
108
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
109
+ """
110
+ Main entry point for post-processing.
111
+ Applies category mapping, smoothing, counting, alerting, and summary generation.
112
+ Returns a ProcessingResult with all relevant outputs.
113
+ """
114
+ start_time = time.time()
115
+ # Ensure config is correct type
116
+ if not isinstance(config, WarehouseObjectConfig):
117
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
118
+ context=context)
119
+ if context is None:
120
+ context = ProcessingContext()
121
+
122
+ # Detect input format and store in context
123
+ input_format = match_results_structure(data)
124
+ context.input_format = input_format
125
+ context.confidence_threshold = config.confidence_threshold
126
+
127
+ # Step 1: Confidence filtering
128
+ if config.confidence_threshold is not None:
129
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
130
+ else:
131
+ processed_data = data
132
+ self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
133
+
134
+ # Step 2: Apply category mapping if provided
135
+ if config.index_to_category:
136
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
137
+
138
+ # Step 3: Category filtering
139
+ if config.target_categories:
140
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
141
+
142
+ # Step 4: Apply bbox smoothing if enabled
143
+ # Deep-copy detections so that we preserve the original masks before any
144
+ # smoothing/tracking logic potentially removes them.
145
+ raw_processed_data = [copy.deepcopy(det) for det in processed_data]
146
+ if config.enable_smoothing:
147
+ if self.smoothing_tracker is None:
148
+ smoothing_config = BBoxSmoothingConfig(
149
+ smoothing_algorithm=config.smoothing_algorithm,
150
+ window_size=config.smoothing_window_size,
151
+ cooldown_frames=config.smoothing_cooldown_frames,
152
+ confidence_threshold=config.confidence_threshold,
153
+ confidence_range_factor=config.smoothing_confidence_range_factor,
154
+ enable_smoothing=True
155
+ )
156
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
157
+
158
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
159
+ # Restore masks after smoothing
160
+
161
+ # Step 5: Advanced tracking (BYTETracker-like)
162
+ try:
163
+ from ..advanced_tracker import AdvancedTracker
164
+ from ..advanced_tracker.config import TrackerConfig
165
+
166
+ # Create tracker instance if it doesn't exist (preserves state across frames)
167
+ if self.tracker is None:
168
+ tracker_config = TrackerConfig()
169
+ self.tracker = AdvancedTracker(tracker_config)
170
+ self.logger.info("Initialized AdvancedTracker for Monitoring and tracking")
171
+
172
+ processed_data = self.tracker.update(processed_data)
173
+ except Exception as e:
174
+ # If advanced tracker fails, fallback to unsmoothed detections
175
+ self.logger.warning(f"AdvancedTracker failed: {e}")
176
+
177
+ # Update tracking state for total count per label
178
+ self._update_tracking_state(processed_data)
179
+
180
+ # ------------------------------------------------------------------ #
181
+ # Re-attach segmentation masks that were present in the original input
182
+ # but may have been stripped during smoothing/tracking. We match each
183
+ # processed detection back to the raw detection with the highest IoU
184
+ # and copy over its "masks" field (if available).
185
+ # ------------------------------------------------------------------ #
186
+ processed_data = self._attach_masks_to_detections(processed_data, raw_processed_data)
187
+
188
+ # Update frame counter
189
+ self._total_frame_counter += 1
190
+
191
+ # Extract frame information from stream_info
192
+ frame_number = None
193
+ if stream_info:
194
+ input_settings = stream_info.get("input_settings", {})
195
+ start_frame = input_settings.get("start_frame")
196
+ end_frame = input_settings.get("end_frame")
197
+ # If start and end frame are the same, it's a single frame
198
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
199
+ frame_number = start_frame
200
+
201
+ # Compute summaries and alerts
202
+ general_counting_summary = calculate_counting_summary(data)
203
+ counting_summary = self._count_categories(processed_data, config)
204
+ # Add total unique counts after tracking using only local state
205
+ total_counts = self.get_total_counts()
206
+ counting_summary['total_counts'] = total_counts
207
+
208
+ alerts = self._check_alerts(counting_summary, frame_number, config)
209
+ predictions = self._extract_predictions(processed_data)
210
+
211
+ # Step: Generate structured events and tracking stats with frame-based keys
212
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
213
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number,stream_info)
214
+ # business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=False)
215
+ business_analytics_list = []
216
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
217
+
218
+ # Extract frame-based dictionaries from the lists
219
+ incidents = incidents_list[0] if incidents_list else {}
220
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
221
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
222
+ summary = summary_list[0] if summary_list else {}
223
+ agg_summary = {str(frame_number): {
224
+ "incidents": incidents,
225
+ "tracking_stats": tracking_stats,
226
+ "business_analytics": business_analytics,
227
+ "alerts": alerts,
228
+ "human_text": summary}
229
+ }
230
+
231
+ context.mark_completed()
232
+
233
+ # Build result object following the new pattern
234
+
235
+ result = self.create_result(
236
+ data={"agg_summary": agg_summary},
237
+ usecase=self.name,
238
+ category=self.category,
239
+ context=context
240
+ )
241
+
242
+ return result
243
+
244
+ def _check_alerts(self, summary: dict, frame_number: Any, config: WarehouseObjectConfig) -> List[Dict]:
245
+ """
246
+ Check if any alert thresholds are exceeded and return alert dicts.
247
+ """
248
+ def get_trend(data, lookback=900, threshold=0.6):
249
+ '''
250
+ Determine if the trend is ascending or descending based on actual value progression.
251
+ Now works with values 0,1,2,3 (not just binary).
252
+ '''
253
+ window = data[-lookback:] if len(data) >= lookback else data
254
+ if len(window) < 2:
255
+ return True # not enough data to determine trend
256
+ increasing = 0
257
+ total = 0
258
+ for i in range(1, len(window)):
259
+ if window[i] >= window[i - 1]:
260
+ increasing += 1
261
+ total += 1
262
+ ratio = increasing / total
263
+ if ratio >= threshold:
264
+ return True
265
+ elif ratio <= (1 - threshold):
266
+ return False
267
+
268
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
269
+ alerts = []
270
+ total_detections = summary.get("total_count", 0) #CURRENT combined total count of all classes
271
+ total_counts_dict = summary.get("total_counts", {}) #TOTAL cumulative counts per class
272
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
273
+ per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
274
+
275
+ if not config.alert_config:
276
+ return alerts
277
+
278
+ total = summary.get("total_count", 0)
279
+ #self._ascending_alert_list
280
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
281
+
282
+ for category, threshold in config.alert_config.count_thresholds.items():
283
+ if category == "all" and total > threshold:
284
+
285
+ alerts.append({
286
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
287
+ "alert_id": "alert_"+category+'_'+frame_key,
288
+ "incident_category": self.CASE_TYPE,
289
+ "threshold_level": threshold,
290
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
291
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
292
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
293
+ }
294
+ })
295
+ elif category in summary.get("per_category_count", {}):
296
+ count = summary.get("per_category_count", {})[category]
297
+ if count > threshold: # Fixed logic: alert when EXCEEDING threshold
298
+ alerts.append({
299
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
300
+ "alert_id": "alert_"+category+'_'+frame_key,
301
+ "incident_category": self.CASE_TYPE,
302
+ "threshold_level": threshold,
303
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
304
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
305
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
306
+ }
307
+ })
308
+ else:
309
+ pass
310
+ return alerts
311
+
312
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: WarehouseObjectConfig,
313
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
314
+ Dict]:
315
+ """Generate structured events for the output format with frame-based keys."""
316
+
317
+ # Use frame number as key, fallback to 'current_frame' if not available
318
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
319
+ incidents=[]
320
+ total_detections = counting_summary.get("total_count", 0)
321
+ current_timestamp = self._get_current_timestamp_str(stream_info)
322
+ camera_info = self.get_camera_info_from_stream(stream_info)
323
+
324
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
325
+
326
+ if total_detections > 0:
327
+ # Determine event level based on thresholds
328
+ level = "low"
329
+ intensity = 5.0
330
+ start_timestamp = self._get_start_timestamp_str(stream_info)
331
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
332
+ self.current_incident_end_timestamp = 'Incident still active'
333
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
334
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
335
+ self.current_incident_end_timestamp = current_timestamp
336
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
337
+ self.current_incident_end_timestamp = 'N/A'
338
+
339
+ if config.alert_config and config.alert_config.count_thresholds:
340
+ threshold = config.alert_config.count_thresholds.get("all", 15)
341
+ intensity = min(10.0, (total_detections / threshold) * 10)
342
+
343
+ if intensity >= 9:
344
+ level = "critical"
345
+ self._ascending_alert_list.append(3)
346
+ elif intensity >= 7:
347
+ level = "significant"
348
+ self._ascending_alert_list.append(2)
349
+ elif intensity >= 5:
350
+ level = "medium"
351
+ self._ascending_alert_list.append(1)
352
+ else:
353
+ level = "low"
354
+ self._ascending_alert_list.append(0)
355
+ else:
356
+ if total_detections > 30:
357
+ level = "critical"
358
+ intensity = 10.0
359
+ self._ascending_alert_list.append(3)
360
+ elif total_detections > 25:
361
+ level = "significant"
362
+ intensity = 9.0
363
+ self._ascending_alert_list.append(2)
364
+ elif total_detections > 15:
365
+ level = "medium"
366
+ intensity = 7.0
367
+ self._ascending_alert_list.append(1)
368
+ else:
369
+ level = "low"
370
+ intensity = min(10.0, total_detections / 3.0)
371
+ self._ascending_alert_list.append(0)
372
+
373
+ # Generate human text in new format
374
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
375
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
376
+ human_text = "\n".join(human_text_lines)
377
+
378
+ alert_settings = []
379
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
380
+ alert_settings.append({
381
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
382
+ "incident_category": self.CASE_TYPE,
383
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
384
+ "ascending": True,
385
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
386
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
387
+ }
388
+ })
389
+
390
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
391
+ severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
392
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
393
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
394
+ incidents.append(event)
395
+
396
+ else:
397
+ self._ascending_alert_list.append(0)
398
+ incidents.append({})
399
+
400
+ return incidents
401
+
402
+ def _generate_tracking_stats(
403
+ self,
404
+ counting_summary: Dict,
405
+ alerts: List,
406
+ config: WarehouseObjectConfig,
407
+ frame_number: Optional[int] = None,
408
+ stream_info: Optional[Dict[str, Any]] = None
409
+ ) -> List[Dict]:
410
+ """Generate structured tracking stats matching eg.json format."""
411
+ camera_info = self.get_camera_info_from_stream(stream_info)
412
+
413
+ tracking_stats = []
414
+ # frame_key = str(frame_number) if frame_number is not None else "current_frame"
415
+ # tracking_stats = [{frame_key: []}]
416
+ # frame_tracking_stats = tracking_stats[0][frame_key]
417
+
418
+ total_detections = counting_summary.get("total_count", 0) #CURRENT total count of all classes
419
+ total_counts_dict = counting_summary.get("total_counts", {}) #TOTAL cumulative counts per class
420
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
421
+ per_category_count = counting_summary.get("per_category_count", {}) #CURRENT count per class
422
+
423
+ track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
424
+
425
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
426
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
427
+
428
+ # Create high precision timestamps for input_timestamp and reset_timestamp
429
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
430
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
431
+
432
+
433
+ # Build total_counts array in expected format
434
+ total_counts = []
435
+ for cat, count in total_counts_dict.items():
436
+ if count > 0:
437
+ total_counts.append({
438
+ "category": cat,
439
+ "count": count
440
+ })
441
+ print(total_counts)
442
+ # Build current_counts array in expected format
443
+ current_counts = []
444
+ for cat, count in per_category_count.items():
445
+ if count > 0 or total_detections > 0: # Include even if 0 when there are detections
446
+ current_counts.append({
447
+ "category": cat,
448
+ "count": count
449
+ })
450
+ print(current_counts)
451
+ # Prepare detections without confidence scores (as per eg.json)
452
+ detections = []
453
+ for detection in counting_summary.get("detections", []):
454
+ bbox = detection.get("bounding_box", {})
455
+ category = detection.get("category", "person")
456
+ # Include segmentation if available (like in eg.json)
457
+ if detection.get("masks"):
458
+ segmentation= detection.get("masks", [])
459
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
460
+ elif detection.get("segmentation"):
461
+ segmentation= detection.get("segmentation")
462
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
463
+ elif detection.get("mask"):
464
+ segmentation= detection.get("mask")
465
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
466
+ else:
467
+ detection_obj = self.create_detection_object(category, bbox)
468
+ detections.append(detection_obj)
469
+ print(detections)
470
+ # Build alert_settings array in expected format
471
+ alert_settings = []
472
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
473
+ alert_settings.append({
474
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
475
+ "incident_category": self.CASE_TYPE,
476
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
477
+ "ascending": True,
478
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
479
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
480
+ }
481
+ })
482
+ print(alert_settings)
483
+ # Generate human_text in expected format
484
+ human_text_lines = [f"Tracking Statistics:"]
485
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
486
+
487
+ for cat, count in per_category_count.items():
488
+ human_text_lines.append(f"\t{cat}: {count}")
489
+
490
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
491
+ for cat, count in total_counts_dict.items():
492
+ if count > 0:
493
+ human_text_lines.append(f"\t{cat}: {count}")
494
+
495
+ if alerts:
496
+ for alert in alerts:
497
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
498
+ else:
499
+ human_text_lines.append("Alerts: None")
500
+
501
+ human_text = "\n".join(human_text_lines)
502
+ reset_settings = [
503
+ {
504
+ "interval_type": "daily",
505
+ "reset_time": {
506
+ "value": 9,
507
+ "time_unit": "hour"
508
+ }
509
+ }
510
+ ]
511
+ print(human_text)
512
+ tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
513
+ detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
514
+ reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
515
+ reset_time=high_precision_reset_timestamp)
516
+
517
+ tracking_stats.append(tracking_stat)
518
+ print(tracking_stats)
519
+ return tracking_stats
520
+
521
+ def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: WarehouseObjectConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
522
+ """Generate standardized business analytics for the agg_summary structure."""
523
+ if is_empty:
524
+ return []
525
+
526
+ #-----IF YOUR USECASE NEEDS BUSINESS ANALYTICS, YOU CAN USE THIS FUNCTION------#
527
+ #camera_info = self.get_camera_info_from_stream(stream_info)
528
+ # business_analytics = self.create_business_analytics(nalysis_name, statistics,
529
+ # human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
530
+ # reset_settings)
531
+ # return business_analytics
532
+
533
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
534
+ """
535
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
536
+ """
537
+ lines = {}
538
+ lines["Application Name"] = self.CASE_TYPE
539
+ lines["Application Version"] = self.CASE_VERSION
540
+ if len(incidents) > 0:
541
+ lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
542
+ if len(tracking_stats) > 0:
543
+ lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
544
+ if len(business_analytics) > 0:
545
+ lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
546
+
547
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
548
+ lines["Summary"] = "No Summary Data"
549
+
550
+ return [lines]
551
+
552
+
553
+ def _count_categories(self, detections: list, config: WarehouseObjectConfig) -> dict:
554
+ """
555
+ Count the number of detections per category and return a summary dict.
556
+ The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', 'masks', etc.
557
+ Output structure will include 'track_id' and 'masks' for each detection as per AdvancedTracker output.
558
+ """
559
+ counts = {}
560
+ valid_detections = []
561
+ for det in detections:
562
+ cat = det.get('category', 'unknown')
563
+ if not all(k in det for k in ['category', 'confidence', 'bounding_box']): # Validate required fields
564
+ self.logger.warning(f"Skipping invalid detection: {det}")
565
+ continue
566
+ counts[cat] = counts.get(cat, 0) + 1
567
+ valid_detections.append({
568
+ "bounding_box": det.get("bounding_box"),
569
+ "category": det.get("category"),
570
+ "confidence": det.get("confidence"),
571
+ "track_id": det.get("track_id"),
572
+ "frame_id": det.get("frame_id"),
573
+ "masks": det.get("masks", det.get("mask", [])) # Include masks, fallback to empty list
574
+ })
575
+ self.logger.debug(f"Valid detections after filtering: {len(valid_detections)}")
576
+ return {
577
+ "total_count": sum(counts.values()),
578
+ "per_category_count": counts,
579
+ "detections": valid_detections
580
+ }
581
+
582
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
583
+ """
584
+ Get detailed information about track IDs (per frame).
585
+ """
586
+ # Collect all track_ids in this frame
587
+ frame_track_ids = set()
588
+ for det in detections:
589
+ tid = det.get('track_id')
590
+ if tid is not None:
591
+ frame_track_ids.add(tid)
592
+ # Use persistent total set for unique counting
593
+ total_track_ids = set()
594
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
595
+ total_track_ids.update(s)
596
+ return {
597
+ "total_count": len(total_track_ids),
598
+ "current_frame_count": len(frame_track_ids),
599
+ "total_unique_track_ids": len(total_track_ids),
600
+ "current_frame_track_ids": list(frame_track_ids),
601
+ "last_update_time": time.time(),
602
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
603
+ }
604
+
605
+ def _update_tracking_state(self, detections: list):
606
+ """
607
+ Track unique categories track_ids per category for total count after tracking.
608
+ Applies canonical ID merging to avoid duplicate counting when the underlying
609
+ tracker loses an object temporarily and assigns a new ID.
610
+ """
611
+ # Lazily initialise storage dicts
612
+ if not hasattr(self, "_per_category_total_track_ids"):
613
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
614
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
615
+
616
+ for det in detections:
617
+ cat = det.get("category")
618
+ raw_track_id = det.get("track_id")
619
+ if cat not in self.target_categories or raw_track_id is None:
620
+ continue
621
+ bbox = det.get("bounding_box", det.get("bbox"))
622
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
623
+ # Propagate canonical ID back to detection so downstream logic uses it
624
+ det["track_id"] = canonical_id
625
+
626
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
627
+ self._current_frame_track_ids[cat].add(canonical_id)
628
+
629
+ def get_total_counts(self):
630
+ """
631
+ Return total unique track_id count for each category.
632
+ """
633
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
634
+
635
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
636
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
637
+ hours = int(timestamp // 3600)
638
+ minutes = int((timestamp % 3600) // 60)
639
+ seconds = round(float(timestamp % 60),2)
640
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
641
+
642
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
643
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
644
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
645
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
646
+
647
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
648
+ """Get formatted current timestamp based on stream type."""
649
+ if not stream_info:
650
+ return "00:00:00.00"
651
+ # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
652
+ if precision:
653
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
654
+ if frame_id:
655
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
656
+ else:
657
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
658
+ stream_time_str = self._format_timestamp_for_video(start_time)
659
+ return stream_time_str
660
+ else:
661
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
662
+
663
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
664
+ if frame_id:
665
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
666
+ else:
667
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
668
+ stream_time_str = self._format_timestamp_for_video(start_time)
669
+ return stream_time_str
670
+ else:
671
+ # For streams, use stream_time from stream_info
672
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
673
+ if stream_time_str:
674
+ # Parse the high precision timestamp string to get timestamp
675
+ try:
676
+ # Remove " UTC" suffix and parse
677
+ timestamp_str = stream_time_str.replace(" UTC", "")
678
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
679
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
680
+ return self._format_timestamp_for_stream(timestamp)
681
+ except:
682
+ # Fallback to current time if parsing fails
683
+ return self._format_timestamp_for_stream(time.time())
684
+ else:
685
+ return self._format_timestamp_for_stream(time.time())
686
+
687
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
688
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
689
+ if not stream_info:
690
+ return "00:00:00"
691
+ if precision:
692
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
693
+ return "00:00:00"
694
+ else:
695
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
696
+
697
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
698
+ # If video format, start from 00:00:00
699
+ return "00:00:00"
700
+ else:
701
+ # For streams, use tracking start time or current time with minutes/seconds reset
702
+ if self._tracking_start_time is None:
703
+ # Try to extract timestamp from stream_time string
704
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
705
+ if stream_time_str:
706
+ try:
707
+ # Remove " UTC" suffix and parse
708
+ timestamp_str = stream_time_str.replace(" UTC", "")
709
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
710
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
711
+ except:
712
+ # Fallback to current time if parsing fails
713
+ self._tracking_start_time = time.time()
714
+ else:
715
+ self._tracking_start_time = time.time()
716
+
717
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
718
+ # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
719
+ dt = dt.replace(minute=0, second=0, microsecond=0)
720
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
721
+
722
+ # ------------------------------------------------------------------ #
723
+ # Helper to merge masks back into detections #
724
+ # ------------------------------------------------------------------ #
725
+ def _attach_masks_to_detections(
726
+ self,
727
+ processed_detections: List[Dict[str, Any]],
728
+ raw_detections: List[Dict[str, Any]],
729
+ iou_threshold: float = 0.5,
730
+ ) -> List[Dict[str, Any]]:
731
+ """
732
+ Attach segmentation masks from the original `raw_detections` list to the
733
+ `processed_detections` list returned after smoothing/tracking.
734
+
735
+ Matching between detections is performed using Intersection-over-Union
736
+ (IoU) of the bounding boxes. For each processed detection we select the
737
+ raw detection with the highest IoU above `iou_threshold` and copy its
738
+ `masks` (or `mask`) field. If no suitable match is found, the detection
739
+ keeps an empty list for `masks` to maintain a consistent schema.
740
+ """
741
+
742
+ if not processed_detections or not raw_detections:
743
+ # Nothing to do – ensure masks key exists for downstream logic.
744
+ for det in processed_detections:
745
+ det.setdefault("masks", [])
746
+ return processed_detections
747
+
748
+ # Track which raw detections have already been matched to avoid
749
+ # assigning the same mask to multiple processed detections.
750
+ used_raw_indices = set()
751
+
752
+ for det in processed_detections:
753
+ best_iou = 0.0
754
+ best_idx = None
755
+
756
+ for idx, raw_det in enumerate(raw_detections):
757
+ if idx in used_raw_indices:
758
+ continue
759
+
760
+ iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
761
+ if iou > best_iou:
762
+ best_iou = iou
763
+ best_idx = idx
764
+
765
+ if best_idx is not None and best_iou >= iou_threshold:
766
+ raw_det = raw_detections[best_idx]
767
+ masks = raw_det.get("masks", raw_det.get("mask"))
768
+ if masks is not None:
769
+ det["masks"] = masks
770
+ used_raw_indices.add(best_idx)
771
+ else:
772
+ # No adequate match – default to empty list to keep schema consistent.
773
+ det.setdefault("masks", ["EMPTY"])
774
+
775
+ return processed_detections
776
+
777
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
778
+ """
779
+ Extract prediction details for output (category, confidence, bounding box).
780
+ """
781
+ return [
782
+ {
783
+ "category": det.get("category", "unknown"),
784
+ "confidence": det.get("confidence", 0.0),
785
+ "bounding_box": det.get("bounding_box", {}),
786
+ "mask": det.get("mask", det.get("masks", None)) # Accept either key
787
+ }
788
+ for det in detections
789
+ ]
790
+
791
+
792
+ # ------------------------------------------------------------------ #
793
+ # Canonical ID helpers #
794
+ # ------------------------------------------------------------------ #
795
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
796
+ """Compute IoU between two bounding boxes which may be dicts or lists.
797
+ Falls back to 0 when insufficient data is available."""
798
+
799
+ # Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
800
+ def _bbox_to_list(bbox):
801
+ if bbox is None:
802
+ return []
803
+ if isinstance(bbox, list):
804
+ return bbox[:4] if len(bbox) >= 4 else []
805
+ if isinstance(bbox, dict):
806
+ if "xmin" in bbox:
807
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
808
+ if "x1" in bbox:
809
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
810
+ # Fallback: first four numeric values
811
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
812
+ return values[:4] if len(values) >= 4 else []
813
+ return []
814
+
815
+ l1 = _bbox_to_list(box1)
816
+ l2 = _bbox_to_list(box2)
817
+ if len(l1) < 4 or len(l2) < 4:
818
+ return 0.0
819
+ x1_min, y1_min, x1_max, y1_max = l1
820
+ x2_min, y2_min, x2_max, y2_max = l2
821
+
822
+ # Ensure correct order
823
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
824
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
825
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
826
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
827
+
828
+ inter_x_min = max(x1_min, x2_min)
829
+ inter_y_min = max(y1_min, y2_min)
830
+ inter_x_max = min(x1_max, x2_max)
831
+ inter_y_max = min(y1_max, y2_max)
832
+
833
+ inter_w = max(0.0, inter_x_max - inter_x_min)
834
+ inter_h = max(0.0, inter_y_max - inter_y_min)
835
+ inter_area = inter_w * inter_h
836
+
837
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
838
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
839
+ union_area = area1 + area2 - inter_area
840
+
841
+ return (inter_area / union_area) if union_area > 0 else 0.0
842
+
843
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
844
+ """Return a stable canonical ID for a raw tracker ID, merging fragmented
845
+ tracks when IoU and temporal constraints indicate they represent the
846
+ same physical."""
847
+ if raw_id is None or bbox is None:
848
+ # Nothing to merge
849
+ return raw_id
850
+
851
+ now = time.time()
852
+
853
+ # Fast path – raw_id already mapped
854
+ if raw_id in self._track_aliases:
855
+ canonical_id = self._track_aliases[raw_id]
856
+ track_info = self._canonical_tracks.get(canonical_id)
857
+ if track_info is not None:
858
+ track_info["last_bbox"] = bbox
859
+ track_info["last_update"] = now
860
+ track_info["raw_ids"].add(raw_id)
861
+ return canonical_id
862
+
863
+ # Attempt to merge with an existing canonical track
864
+ for canonical_id, info in self._canonical_tracks.items():
865
+ # Only consider recently updated tracks
866
+ if now - info["last_update"] > self._track_merge_time_window:
867
+ continue
868
+ iou = self._compute_iou(bbox, info["last_bbox"])
869
+ if iou >= self._track_merge_iou_threshold:
870
+ # Merge
871
+ self._track_aliases[raw_id] = canonical_id
872
+ info["last_bbox"] = bbox
873
+ info["last_update"] = now
874
+ info["raw_ids"].add(raw_id)
875
+ return canonical_id
876
+
877
+ # No match – register new canonical track
878
+ canonical_id = raw_id
879
+ self._track_aliases[raw_id] = canonical_id
880
+ self._canonical_tracks[canonical_id] = {
881
+ "last_bbox": bbox,
882
+ "last_update": now,
883
+ "raw_ids": {raw_id},
884
+ }
885
+ return canonical_id
886
+
887
+ def _format_timestamp(self, timestamp: float) -> str:
888
+ """Format a timestamp for human-readable output."""
889
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
890
+
891
+ def _get_tracking_start_time(self) -> str:
892
+ """Get the tracking start time, formatted as a string."""
893
+ if self._tracking_start_time is None:
894
+ return "N/A"
895
+ return self._format_timestamp(self._tracking_start_time)
896
+
897
+ def _set_tracking_start_time(self) -> None:
898
+ """Set the tracking start time to the current time."""
899
+ self._tracking_start_time = time.time()