matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1188 @@
1
+ from dataclasses import asdict
2
+ import time
3
+ from datetime import datetime, timezone
4
+ import copy # Added for deep copying detections to preserve original masks
5
+ from typing import Any, Dict, List, Optional
6
+ from ..core.base import (
7
+ BaseProcessor,
8
+ ProcessingContext,
9
+ ProcessingResult,
10
+ ConfigProtocol,
11
+ ResultFormat,
12
+ )
13
+ from ..utils import (
14
+ filter_by_confidence,
15
+ filter_by_categories,
16
+ apply_category_mapping,
17
+ count_objects_by_category,
18
+ count_objects_in_zones,
19
+ calculate_counting_summary,
20
+ match_results_structure,
21
+ bbox_smoothing,
22
+ BBoxSmoothingConfig,
23
+ BBoxSmoothingTracker,
24
+ )
25
+ from dataclasses import dataclass, field
26
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
27
+
28
+
29
+ @dataclass
30
+ class LicensePlateConfig(BaseConfig):
31
+ """Configuration for License plate detection use case in License plate monitoring."""
32
+
33
+ # Smoothing configuration
34
+ enable_smoothing: bool = True
35
+ smoothing_algorithm: str = "observability" # "window" or "observability"
36
+ smoothing_window_size: int = 20
37
+ smoothing_cooldown_frames: int = 5
38
+ smoothing_confidence_range_factor: float = 0.5
39
+
40
+ # confidence thresholds
41
+ confidence_threshold: float = 0.6
42
+
43
+ usecase_categories: List[str] = field(default_factory=lambda: ["license_plate"])
44
+
45
+ target_categories: List[str] = field(default_factory=lambda: ["license_plate"])
46
+
47
+ alert_config: Optional[AlertConfig] = None
48
+
49
+ index_to_category: Optional[Dict[int, str]] = field(
50
+ default_factory=lambda: {
51
+ 0: "license_plate",
52
+ }
53
+ )
54
+
55
+
56
+ class LicensePlateUseCase(BaseProcessor):
57
+
58
+ # Human-friendly display names for categories
59
+ CATEGORY_DISPLAY = {
60
+ "license_plate": "license_plate",
61
+ }
62
+
63
+ def __init__(self):
64
+ super().__init__("license_plate_detection")
65
+ self.category = "license_plate"
66
+
67
+ # List of categories to track
68
+ self.target_categories = ["license_plate"]
69
+
70
+ self.CASE_TYPE: Optional[str] = "license_plate_detection"
71
+ self.CASE_VERSION: Optional[str] = "1.3"
72
+
73
+ # Initialize smoothing tracker
74
+ self.smoothing_tracker = None
75
+
76
+ # Initialize advanced tracker (will be created on first use)
77
+ self.tracker = None
78
+
79
+ # Initialize tracking state variables
80
+ self._total_frame_counter = 0
81
+ self._global_frame_offset = 0
82
+
83
+ # Track start time for "TOTAL SINCE" calculation
84
+ self._tracking_start_time = None
85
+
86
+ # ------------------------------------------------------------------ #
87
+ # Canonical tracking aliasing to avoid duplicate counts #
88
+ # ------------------------------------------------------------------ #
89
+ # Maps raw tracker-generated IDs to stable canonical IDs that persist
90
+ # even if the underlying tracker re-assigns a new ID after a short
91
+ # interruption. This mirrors the logic used in people_counting to
92
+ # provide accurate unique counting.
93
+ self._track_aliases: Dict[Any, Any] = {}
94
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
95
+ # Tunable parameters – adjust if necessary for specific scenarios
96
+ self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
97
+ self._track_merge_time_window: float = 7.0 # seconds within which to merge
98
+
99
+ self._ascending_alert_list: List[int] = []
100
+ self.current_incident_end_timestamp: str = "N/A"
101
+
102
+ def process(
103
+ self,
104
+ data: Any,
105
+ config: ConfigProtocol,
106
+ context: Optional[ProcessingContext] = None,
107
+ stream_info: Optional[Dict[str, Any]] = None,
108
+ ) -> ProcessingResult:
109
+ """
110
+ Main entry point for post-processing.
111
+ Applies category mapping, smoothing, counting, alerting, and summary generation.
112
+ Returns a ProcessingResult with all relevant outputs.
113
+ """
114
+ start_time = time.time()
115
+ # Ensure config is correct type
116
+ if not isinstance(config, LicensePlateConfig):
117
+ return self.create_error_result(
118
+ "Invalid config type",
119
+ usecase=self.name,
120
+ category=self.category,
121
+ context=context,
122
+ )
123
+ if context is None:
124
+ context = ProcessingContext()
125
+
126
+ # Detect input format and store in context
127
+ input_format = match_results_structure(data)
128
+ context.input_format = input_format
129
+ context.confidence_threshold = config.confidence_threshold
130
+
131
+ # Step 1: Confidence filtering
132
+ if config.confidence_threshold is not None:
133
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
134
+ else:
135
+ processed_data = data
136
+ self.logger.debug(
137
+ f"Did not apply confidence filtering with threshold since nothing was provided"
138
+ )
139
+
140
+ # Step 2: Apply category mapping if provided
141
+ if config.index_to_category:
142
+ processed_data = apply_category_mapping(
143
+ processed_data, config.index_to_category
144
+ )
145
+
146
+ # Step 3: Category filtering
147
+ if config.target_categories:
148
+ processed_data = [
149
+ d for d in processed_data if d.get("category") in self.target_categories
150
+ ]
151
+
152
+ # Step 4: Apply bbox smoothing if enabled
153
+ # Deep-copy detections so that we preserve the original masks before any
154
+ # smoothing/tracking logic potentially removes them.
155
+ raw_processed_data = [copy.deepcopy(det) for det in processed_data]
156
+ if config.enable_smoothing:
157
+ if self.smoothing_tracker is None:
158
+ smoothing_config = BBoxSmoothingConfig(
159
+ smoothing_algorithm=config.smoothing_algorithm,
160
+ window_size=config.smoothing_window_size,
161
+ cooldown_frames=config.smoothing_cooldown_frames,
162
+ confidence_threshold=config.confidence_threshold,
163
+ confidence_range_factor=config.smoothing_confidence_range_factor,
164
+ enable_smoothing=True,
165
+ )
166
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
167
+
168
+ processed_data = bbox_smoothing(
169
+ processed_data, self.smoothing_tracker.config, self.smoothing_tracker
170
+ )
171
+ # Restore masks after smoothing
172
+
173
+ # Step 5: Advanced tracking (BYTETracker-like)
174
+ try:
175
+ from ..advanced_tracker import AdvancedTracker
176
+ from ..advanced_tracker.config import TrackerConfig
177
+
178
+ if self.tracker is None:
179
+ # Configure tracker thresholds based on the use-case confidence threshold so that
180
+ # low-confidence detections (e.g. < 0.7) can still be initialised as tracks when
181
+ # the user passes a lower `confidence_threshold` in the post-processing config.
182
+ if config.confidence_threshold is not None:
183
+ tracker_config = TrackerConfig(
184
+ track_high_thresh=float(config.confidence_threshold),
185
+ # Allow even lower detections to participate in secondary association
186
+ track_low_thresh=max(
187
+ 0.05, float(config.confidence_threshold) / 2
188
+ ),
189
+ new_track_thresh=float(config.confidence_threshold),
190
+ )
191
+ else:
192
+ tracker_config = TrackerConfig()
193
+ self.tracker = AdvancedTracker(tracker_config)
194
+ self.logger.info(
195
+ "Initialized AdvancedTracker for Monitoring and tracking with thresholds: "
196
+ f"high={tracker_config.track_high_thresh}, "
197
+ f"low={tracker_config.track_low_thresh}, "
198
+ f"new={tracker_config.new_track_thresh}"
199
+ )
200
+
201
+ # The tracker expects the data in the same format as input
202
+ # It will add track_id and frame_id to each detection
203
+ processed_data = self.tracker.update(processed_data)
204
+ except Exception as e:
205
+ # If advanced tracker fails, fallback to unsmoothed detections
206
+ self.logger.warning(f"AdvancedTracker failed: {e}")
207
+
208
+ # Update tracking state for total count per label
209
+ self._update_tracking_state(processed_data)
210
+
211
+ # ------------------------------------------------------------------ #
212
+ # Re-attach segmentation masks that were present in the original input
213
+ # but may have been stripped during smoothing/tracking. We match each
214
+ # processed detection back to the raw detection with the highest IoU
215
+ # and copy over its "masks" field (if available).
216
+ # ------------------------------------------------------------------ #
217
+ processed_data = self._attach_masks_to_detections(
218
+ processed_data, raw_processed_data
219
+ )
220
+
221
+ # Update frame counter
222
+ self._total_frame_counter += 1
223
+
224
+ # Extract frame information from stream_info
225
+ frame_number = None
226
+ if stream_info:
227
+ input_settings = stream_info.get("input_settings", {})
228
+ start_frame = input_settings.get("start_frame")
229
+ end_frame = input_settings.get("end_frame")
230
+ # If start and end frame are the same, it's a single frame
231
+ if (
232
+ start_frame is not None
233
+ and end_frame is not None
234
+ and start_frame == end_frame
235
+ ):
236
+ frame_number = start_frame
237
+
238
+ # Compute summaries and alerts
239
+ general_counting_summary = calculate_counting_summary(data)
240
+ counting_summary = self._count_categories(processed_data, config)
241
+ # Add total unique counts after tracking using only local state
242
+ total_counts = self.get_total_counts()
243
+ counting_summary["total_counts"] = total_counts
244
+
245
+ alerts = self._check_alerts(counting_summary, frame_number, config)
246
+ predictions = self._extract_predictions(processed_data)
247
+
248
+ # Step: Generate structured events and tracking stats with frame-based keys
249
+ incidents_list = self._generate_incidents(
250
+ counting_summary, alerts, config, frame_number, stream_info
251
+ )
252
+ tracking_stats_list = self._generate_tracking_stats(
253
+ counting_summary, alerts, config, frame_number, stream_info
254
+ )
255
+ # business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=False)
256
+ business_analytics_list = []
257
+ summary_list = self._generate_summary(
258
+ counting_summary,
259
+ incidents_list,
260
+ tracking_stats_list,
261
+ business_analytics_list,
262
+ alerts,
263
+ )
264
+
265
+ # Extract frame-based dictionaries from the lists
266
+ incidents = incidents_list[0] if incidents_list else {}
267
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
268
+ business_analytics = (
269
+ business_analytics_list[0] if business_analytics_list else {}
270
+ )
271
+ summary = summary_list[0] if summary_list else {}
272
+ agg_summary = {
273
+ str(frame_number): {
274
+ "incidents": incidents,
275
+ "tracking_stats": tracking_stats,
276
+ "business_analytics": business_analytics,
277
+ "alerts": alerts,
278
+ "human_text": summary,
279
+ }
280
+ }
281
+
282
+ context.mark_completed()
283
+
284
+ # Build result object following the new pattern
285
+
286
+ result = self.create_result(
287
+ data={"agg_summary": agg_summary},
288
+ usecase=self.name,
289
+ category=self.category,
290
+ context=context,
291
+ )
292
+
293
+ return result
294
+
295
+ def _check_alerts(
296
+ self, summary: dict, frame_number: Any, config: LicensePlateConfig
297
+ ) -> List[Dict]:
298
+ """
299
+ Check if any alert thresholds are exceeded and return alert dicts.
300
+ """
301
+
302
+ def get_trend(data, lookback=900, threshold=0.6):
303
+ """
304
+ Determine if the trend is ascending or descending based on actual value progression.
305
+ Now works with values 0,1,2,3 (not just binary).
306
+ """
307
+ window = data[-lookback:] if len(data) >= lookback else data
308
+ if len(window) < 2:
309
+ return True # not enough data to determine trend
310
+ increasing = 0
311
+ total = 0
312
+ for i in range(1, len(window)):
313
+ if window[i] >= window[i - 1]:
314
+ increasing += 1
315
+ total += 1
316
+ ratio = increasing / total
317
+ if ratio >= threshold:
318
+ return True
319
+ elif ratio <= (1 - threshold):
320
+ return False
321
+
322
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
323
+ alerts = []
324
+ total_detections = summary.get(
325
+ "total_count", 0
326
+ ) # CURRENT combined total count of all classes
327
+ total_counts_dict = summary.get(
328
+ "total_counts", {}
329
+ ) # TOTAL cumulative counts per class
330
+ cumulative_total = (
331
+ sum(total_counts_dict.values()) if total_counts_dict else 0
332
+ ) # TOTAL combined cumulative count
333
+ per_category_count = summary.get(
334
+ "per_category_count", {}
335
+ ) # CURRENT count per class
336
+
337
+ if not config.alert_config:
338
+ return alerts
339
+
340
+ total = summary.get("total_count", 0)
341
+ # self._ascending_alert_list
342
+ if (
343
+ hasattr(config.alert_config, "count_thresholds")
344
+ and config.alert_config.count_thresholds
345
+ ):
346
+
347
+ for category, threshold in config.alert_config.count_thresholds.items():
348
+ if category == "all" and total > threshold:
349
+
350
+ alerts.append(
351
+ {
352
+ "alert_type": (
353
+ getattr(config.alert_config, "alert_type", ["Default"])
354
+ if hasattr(config.alert_config, "alert_type")
355
+ else ["Default"]
356
+ ),
357
+ "alert_id": "alert_" + category + "_" + frame_key,
358
+ "incident_category": self.CASE_TYPE,
359
+ "threshold_level": threshold,
360
+ "ascending": get_trend(
361
+ self._ascending_alert_list, lookback=900, threshold=0.8
362
+ ),
363
+ "settings": {
364
+ t: v
365
+ for t, v in zip(
366
+ (
367
+ getattr(
368
+ config.alert_config,
369
+ "alert_type",
370
+ ["Default"],
371
+ )
372
+ if hasattr(config.alert_config, "alert_type")
373
+ else ["Default"]
374
+ ),
375
+ (
376
+ getattr(
377
+ config.alert_config, "alert_value", ["JSON"]
378
+ )
379
+ if hasattr(config.alert_config, "alert_value")
380
+ else ["JSON"]
381
+ ),
382
+ )
383
+ },
384
+ }
385
+ )
386
+ elif category in summary.get("per_category_count", {}):
387
+ count = summary.get("per_category_count", {})[category]
388
+ if count > threshold: # Fixed logic: alert when EXCEEDING threshold
389
+ alerts.append(
390
+ {
391
+ "alert_type": (
392
+ getattr(
393
+ config.alert_config, "alert_type", ["Default"]
394
+ )
395
+ if hasattr(config.alert_config, "alert_type")
396
+ else ["Default"]
397
+ ),
398
+ "alert_id": "alert_" + category + "_" + frame_key,
399
+ "incident_category": self.CASE_TYPE,
400
+ "threshold_level": threshold,
401
+ "ascending": get_trend(
402
+ self._ascending_alert_list,
403
+ lookback=900,
404
+ threshold=0.8,
405
+ ),
406
+ "settings": {
407
+ t: v
408
+ for t, v in zip(
409
+ (
410
+ getattr(
411
+ config.alert_config,
412
+ "alert_type",
413
+ ["Default"],
414
+ )
415
+ if hasattr(
416
+ config.alert_config, "alert_type"
417
+ )
418
+ else ["Default"]
419
+ ),
420
+ (
421
+ getattr(
422
+ config.alert_config,
423
+ "alert_value",
424
+ ["JSON"],
425
+ )
426
+ if hasattr(
427
+ config.alert_config, "alert_value"
428
+ )
429
+ else ["JSON"]
430
+ ),
431
+ )
432
+ },
433
+ }
434
+ )
435
+ else:
436
+ pass
437
+ return alerts
438
+
439
+ def _generate_incidents(
440
+ self,
441
+ counting_summary: Dict,
442
+ alerts: List,
443
+ config: LicensePlateConfig,
444
+ frame_number: Optional[int] = None,
445
+ stream_info: Optional[Dict[str, Any]] = None,
446
+ ) -> List[Dict]:
447
+ """Generate structured events for the output format with frame-based keys."""
448
+
449
+ # Use frame number as key, fallback to 'current_frame' if not available
450
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
451
+ incidents = []
452
+ total_detections = counting_summary.get("total_count", 0)
453
+ current_timestamp = self._get_current_timestamp_str(stream_info)
454
+ camera_info = self.get_camera_info_from_stream(stream_info)
455
+
456
+ self._ascending_alert_list = (
457
+ self._ascending_alert_list[-900:]
458
+ if len(self._ascending_alert_list) > 900
459
+ else self._ascending_alert_list
460
+ )
461
+
462
+ if total_detections > 0:
463
+ # Determine event level based on thresholds
464
+ level = "low"
465
+ intensity = 5.0
466
+ start_timestamp = self._get_start_timestamp_str(stream_info)
467
+ if start_timestamp and self.current_incident_end_timestamp == "N/A":
468
+ self.current_incident_end_timestamp = "Incident still active"
469
+ elif (
470
+ start_timestamp
471
+ and self.current_incident_end_timestamp == "Incident still active"
472
+ ):
473
+ if (
474
+ len(self._ascending_alert_list) >= 15
475
+ and sum(self._ascending_alert_list[-15:]) / 15 < 1.5
476
+ ):
477
+ self.current_incident_end_timestamp = current_timestamp
478
+ elif (
479
+ self.current_incident_end_timestamp != "Incident still active"
480
+ and self.current_incident_end_timestamp != "N/A"
481
+ ):
482
+ self.current_incident_end_timestamp = "N/A"
483
+
484
+ if config.alert_config and config.alert_config.count_thresholds:
485
+ threshold = config.alert_config.count_thresholds.get("all", 15)
486
+ intensity = min(10.0, (total_detections / threshold) * 10)
487
+
488
+ if intensity >= 9:
489
+ level = "critical"
490
+ self._ascending_alert_list.append(3)
491
+ elif intensity >= 7:
492
+ level = "significant"
493
+ self._ascending_alert_list.append(2)
494
+ elif intensity >= 5:
495
+ level = "medium"
496
+ self._ascending_alert_list.append(1)
497
+ else:
498
+ level = "low"
499
+ self._ascending_alert_list.append(0)
500
+ else:
501
+ if total_detections > 30:
502
+ level = "critical"
503
+ intensity = 10.0
504
+ self._ascending_alert_list.append(3)
505
+ elif total_detections > 25:
506
+ level = "significant"
507
+ intensity = 9.0
508
+ self._ascending_alert_list.append(2)
509
+ elif total_detections > 15:
510
+ level = "medium"
511
+ intensity = 7.0
512
+ self._ascending_alert_list.append(1)
513
+ else:
514
+ level = "low"
515
+ intensity = min(10.0, total_detections / 3.0)
516
+ self._ascending_alert_list.append(0)
517
+
518
+ # Generate human text in new format
519
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
520
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
521
+ human_text = "\n".join(human_text_lines)
522
+
523
+ alert_settings = []
524
+ if config.alert_config and hasattr(config.alert_config, "alert_type"):
525
+ alert_settings.append(
526
+ {
527
+ "alert_type": (
528
+ getattr(config.alert_config, "alert_type", ["Default"])
529
+ if hasattr(config.alert_config, "alert_type")
530
+ else ["Default"]
531
+ ),
532
+ "incident_category": self.CASE_TYPE,
533
+ "threshold_level": (
534
+ config.alert_config.count_thresholds
535
+ if hasattr(config.alert_config, "count_thresholds")
536
+ else {}
537
+ ),
538
+ "ascending": True,
539
+ "settings": {
540
+ t: v
541
+ for t, v in zip(
542
+ (
543
+ getattr(
544
+ config.alert_config, "alert_type", ["Default"]
545
+ )
546
+ if hasattr(config.alert_config, "alert_type")
547
+ else ["Default"]
548
+ ),
549
+ (
550
+ getattr(
551
+ config.alert_config, "alert_value", ["JSON"]
552
+ )
553
+ if hasattr(config.alert_config, "alert_value")
554
+ else ["JSON"]
555
+ ),
556
+ )
557
+ },
558
+ }
559
+ )
560
+
561
+ event = self.create_incident(
562
+ incident_id=self.CASE_TYPE + "_" + str(frame_number),
563
+ incident_type=self.CASE_TYPE,
564
+ severity_level=level,
565
+ human_text=human_text,
566
+ camera_info=camera_info,
567
+ alerts=alerts,
568
+ alert_settings=alert_settings,
569
+ start_time=start_timestamp,
570
+ end_time=self.current_incident_end_timestamp,
571
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7},
572
+ )
573
+ incidents.append(event)
574
+
575
+ else:
576
+ self._ascending_alert_list.append(0)
577
+ incidents.append({})
578
+
579
+ return incidents
580
+
581
+ def _generate_tracking_stats(
582
+ self,
583
+ counting_summary: Dict,
584
+ alerts: Any,
585
+ config: LicensePlateConfig,
586
+ frame_number: Optional[int] = None,
587
+ stream_info: Optional[Dict[str, Any]] = None,
588
+ ) -> List[Dict]:
589
+ """Generate structured tracking stats for the output format with frame-based keys, including track_ids_info and detections with masks."""
590
+ # frame_key = str(frame_number) if frame_number is not None else "current_frame"
591
+ # tracking_stats = [{frame_key: []}]
592
+ # frame_tracking_stats = tracking_stats[0][frame_key]
593
+ tracking_stats = []
594
+
595
+ total_detections = counting_summary.get("total_count", 0)
596
+ total_counts = counting_summary.get("total_counts", {})
597
+ cumulative_total = sum(total_counts.values()) if total_counts else 0
598
+ per_category_count = counting_summary.get("per_category_count", {})
599
+
600
+ track_ids_info = self._get_track_ids_info(
601
+ counting_summary.get("detections", [])
602
+ )
603
+
604
+ current_timestamp = self._get_current_timestamp_str(
605
+ stream_info, precision=False
606
+ )
607
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
608
+
609
+ # Create high precision timestamps for input_timestamp and reset_timestamp
610
+ high_precision_start_timestamp = self._get_current_timestamp_str(
611
+ stream_info, precision=True
612
+ )
613
+ high_precision_reset_timestamp = self._get_start_timestamp_str(
614
+ stream_info, precision=True
615
+ )
616
+
617
+ camera_info = self.get_camera_info_from_stream(stream_info)
618
+ human_text_lines = []
619
+
620
+ # CURRENT FRAME section
621
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
622
+ if total_detections > 0:
623
+ category_counts = [
624
+ f"{count} {cat}" for cat, count in per_category_count.items()
625
+ ]
626
+ if len(category_counts) == 1:
627
+ detection_text = category_counts[0] + " detected"
628
+ elif len(category_counts) == 2:
629
+ detection_text = (
630
+ f"{category_counts[0]} and {category_counts[1]} detected"
631
+ )
632
+ else:
633
+ detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
634
+ human_text_lines.append(f"\t- {detection_text}")
635
+ else:
636
+ human_text_lines.append(f"\t- No detections")
637
+
638
+ human_text_lines.append("") # spacing
639
+
640
+ # TOTAL SINCE section
641
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
642
+ human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
643
+ # Add category-wise counts
644
+ if total_counts:
645
+ for cat, count in total_counts.items():
646
+ if count > 0: # Only include categories with non-zero counts
647
+ human_text_lines.append(f"\t- {cat}: {count}")
648
+ # Build current_counts array in expected format
649
+ current_counts = []
650
+ for cat, count in per_category_count.items():
651
+ if (
652
+ count > 0 or total_detections > 0
653
+ ): # Include even if 0 when there are detections
654
+ current_counts.append({"category": cat, "count": count})
655
+
656
+ human_text = "\n".join(human_text_lines)
657
+
658
+ # Include detections with masks from counting_summary
659
+ # Prepare detections without confidence scores (as per eg.json)
660
+ detections = []
661
+ for detection in counting_summary.get("detections", []):
662
+ bbox = detection.get("bounding_box", {})
663
+ category = detection.get("category", "person")
664
+ # Include segmentation if available (like in eg.json)
665
+ if detection.get("masks"):
666
+ segmentation = detection.get("masks", [])
667
+ detection_obj = self.create_detection_object(
668
+ category, bbox, segmentation=segmentation
669
+ )
670
+ elif detection.get("segmentation"):
671
+ segmentation = detection.get("segmentation")
672
+ detection_obj = self.create_detection_object(
673
+ category, bbox, segmentation=segmentation
674
+ )
675
+ elif detection.get("mask"):
676
+ segmentation = detection.get("mask")
677
+ detection_obj = self.create_detection_object(
678
+ category, bbox, segmentation=segmentation
679
+ )
680
+ else:
681
+ detection_obj = self.create_detection_object(category, bbox)
682
+ detections.append(detection_obj)
683
+
684
+ # Build alert_settings array in expected format
685
+ alert_settings = []
686
+ if config.alert_config and hasattr(config.alert_config, "alert_type"):
687
+ alert_settings.append(
688
+ {
689
+ "alert_type": (
690
+ getattr(config.alert_config, "alert_type", ["Default"])
691
+ if hasattr(config.alert_config, "alert_type")
692
+ else ["Default"]
693
+ ),
694
+ "incident_category": self.CASE_TYPE,
695
+ "threshold_level": (
696
+ config.alert_config.count_thresholds
697
+ if hasattr(config.alert_config, "count_thresholds")
698
+ else {}
699
+ ),
700
+ "ascending": True,
701
+ "settings": {
702
+ t: v
703
+ for t, v in zip(
704
+ (
705
+ getattr(config.alert_config, "alert_type", ["Default"])
706
+ if hasattr(config.alert_config, "alert_type")
707
+ else ["Default"]
708
+ ),
709
+ (
710
+ getattr(config.alert_config, "alert_value", ["JSON"])
711
+ if hasattr(config.alert_config, "alert_value")
712
+ else ["JSON"]
713
+ ),
714
+ )
715
+ },
716
+ }
717
+ )
718
+
719
+ if alerts:
720
+ for alert in alerts:
721
+ human_text_lines.append(
722
+ f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}"
723
+ )
724
+ else:
725
+ human_text_lines.append("Alerts: None")
726
+
727
+ human_text = "\n".join(human_text_lines)
728
+ reset_settings = [
729
+ {"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}
730
+ ]
731
+
732
+ tracking_stat = self.create_tracking_stats(
733
+ total_counts=total_counts,
734
+ current_counts=current_counts,
735
+ detections=detections,
736
+ human_text=human_text,
737
+ camera_info=camera_info,
738
+ alerts=alerts,
739
+ alert_settings=alert_settings,
740
+ reset_settings=reset_settings,
741
+ start_time=high_precision_start_timestamp,
742
+ reset_time=high_precision_reset_timestamp,
743
+ )
744
+
745
+ tracking_stats.append(tracking_stat)
746
+ return tracking_stats
747
+
748
+ def _generate_business_analytics(
749
+ self,
750
+ counting_summary: Dict,
751
+ zone_analysis: Dict,
752
+ config: LicensePlateConfig,
753
+ stream_info: Optional[Dict[str, Any]] = None,
754
+ is_empty=False,
755
+ ) -> List[Dict]:
756
+ """Generate standardized business analytics for the agg_summary structure."""
757
+ if is_empty:
758
+ return []
759
+
760
+ # -----IF YOUR USECASE NEEDS BUSINESS ANALYTICS, YOU CAN USE THIS FUNCTION------#
761
+ # camera_info = self.get_camera_info_from_stream(stream_info)
762
+ # business_analytics = self.create_business_analytics(nalysis_name, statistics,
763
+ # human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
764
+ # reset_settings)
765
+ # return business_analytics
766
+
767
+ def _generate_summary(
768
+ self,
769
+ summary: dict,
770
+ incidents: List,
771
+ tracking_stats: List,
772
+ business_analytics: List,
773
+ alerts: List,
774
+ ) -> List[str]:
775
+ """
776
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
777
+ """
778
+ lines = {}
779
+ lines["Application Name"] = self.CASE_TYPE
780
+ lines["Application Version"] = self.CASE_VERSION
781
+ if len(incidents) > 0:
782
+ lines["Incidents:"] = (
783
+ f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
784
+ )
785
+ if len(tracking_stats) > 0:
786
+ lines["Tracking Statistics:"] = (
787
+ f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
788
+ )
789
+ if len(business_analytics) > 0:
790
+ lines["Business Analytics:"] = (
791
+ f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
792
+ )
793
+
794
+ if (
795
+ len(incidents) == 0
796
+ and len(tracking_stats) == 0
797
+ and len(business_analytics) == 0
798
+ ):
799
+ lines["Summary"] = "No Summary Data"
800
+
801
+ return [lines]
802
+
803
+ def _count_categories(self, detections: list, config: LicensePlateConfig) -> dict:
804
+ """
805
+ Count the number of detections per category and return a summary dict.
806
+ The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', 'masks', etc.
807
+ Output structure will include 'track_id' and 'masks' for each detection as per AdvancedTracker output.
808
+ """
809
+ counts = {}
810
+ valid_detections = []
811
+ for det in detections:
812
+ cat = det.get("category", "unknown")
813
+ if not all(
814
+ k in det for k in ["category", "confidence", "bounding_box"]
815
+ ): # Validate required fields
816
+ self.logger.warning(f"Skipping invalid detection: {det}")
817
+ continue
818
+ counts[cat] = counts.get(cat, 0) + 1
819
+ valid_detections.append(
820
+ {
821
+ "bounding_box": det.get("bounding_box"),
822
+ "category": det.get("category"),
823
+ "confidence": det.get("confidence"),
824
+ "track_id": det.get("track_id"),
825
+ "frame_id": det.get("frame_id"),
826
+ "masks": det.get(
827
+ "masks", det.get("mask", [])
828
+ ), # Include masks, fallback to empty list
829
+ }
830
+ )
831
+ self.logger.debug(f"Valid detections after filtering: {len(valid_detections)}")
832
+ return {
833
+ "total_count": sum(counts.values()),
834
+ "per_category_count": counts,
835
+ "detections": valid_detections,
836
+ }
837
+
838
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
839
+ """
840
+ Get detailed information about track IDs (per frame).
841
+ """
842
+ # Collect all track_ids in this frame
843
+ frame_track_ids = set()
844
+ for det in detections:
845
+ tid = det.get("track_id")
846
+ if tid is not None:
847
+ frame_track_ids.add(tid)
848
+ # Use persistent total set for unique counting
849
+ total_track_ids = set()
850
+ for s in getattr(self, "_per_category_total_track_ids", {}).values():
851
+ total_track_ids.update(s)
852
+ return {
853
+ "total_count": len(total_track_ids),
854
+ "current_frame_count": len(frame_track_ids),
855
+ "total_unique_track_ids": len(total_track_ids),
856
+ "current_frame_track_ids": list(frame_track_ids),
857
+ "last_update_time": time.time(),
858
+ "total_frames_processed": getattr(self, "_total_frame_counter", 0),
859
+ }
860
+
861
+ def _update_tracking_state(self, detections: list):
862
+ """
863
+ Track unique categories track_ids per category for total count after tracking.
864
+ Applies canonical ID merging to avoid duplicate counting when the underlying
865
+ tracker loses an object temporarily and assigns a new ID.
866
+ """
867
+ # Lazily initialise storage dicts
868
+ if not hasattr(self, "_per_category_total_track_ids"):
869
+ self._per_category_total_track_ids = {
870
+ cat: set() for cat in self.target_categories
871
+ }
872
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
873
+
874
+ for det in detections:
875
+ cat = det.get("category")
876
+ raw_track_id = det.get("track_id")
877
+ if cat not in self.target_categories or raw_track_id is None:
878
+ continue
879
+ bbox = det.get("bounding_box", det.get("bbox"))
880
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
881
+ # Propagate canonical ID back to detection so downstream logic uses it
882
+ det["track_id"] = canonical_id
883
+
884
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
885
+ self._current_frame_track_ids[cat].add(canonical_id)
886
+
887
+ def get_total_counts(self):
888
+ """
889
+ Return total unique track_id count for each category.
890
+ """
891
+ return {
892
+ cat: len(ids)
893
+ for cat, ids in getattr(self, "_per_category_total_track_ids", {}).items()
894
+ }
895
+
896
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
897
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
898
+ hours = int(timestamp // 3600)
899
+ minutes = int((timestamp % 3600) // 60)
900
+ seconds = round(float(timestamp % 60), 2)
901
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
902
+
903
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
904
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
905
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
906
+ return dt.strftime("%Y:%m:%d %H:%M:%S")
907
+
908
+ def _get_current_timestamp_str(
909
+ self,
910
+ stream_info: Optional[Dict[str, Any]],
911
+ precision=False,
912
+ frame_id: Optional[str] = None,
913
+ ) -> str:
914
+ """Get formatted current timestamp based on stream type."""
915
+ if not stream_info:
916
+ return "00:00:00.00"
917
+ # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
918
+ if precision:
919
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
920
+ if frame_id:
921
+ start_time = int(frame_id) / stream_info.get(
922
+ "input_settings", {}
923
+ ).get("original_fps", 30)
924
+ else:
925
+ start_time = stream_info.get("input_settings", {}).get(
926
+ "start_frame", 30
927
+ ) / stream_info.get("input_settings", {}).get("original_fps", 30)
928
+ stream_time_str = self._format_timestamp_for_video(start_time)
929
+ return stream_time_str
930
+ else:
931
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
932
+
933
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
934
+ if frame_id:
935
+ start_time = int(frame_id) / stream_info.get("input_settings", {}).get(
936
+ "original_fps", 30
937
+ )
938
+ else:
939
+ start_time = stream_info.get("input_settings", {}).get(
940
+ "start_frame", 30
941
+ ) / stream_info.get("input_settings", {}).get("original_fps", 30)
942
+ stream_time_str = self._format_timestamp_for_video(start_time)
943
+ return stream_time_str
944
+ else:
945
+ # For streams, use stream_time from stream_info
946
+ stream_time_str = (
947
+ stream_info.get("input_settings", {})
948
+ .get("stream_info", {})
949
+ .get("stream_time", "")
950
+ )
951
+ if stream_time_str:
952
+ # Parse the high precision timestamp string to get timestamp
953
+ try:
954
+ # Remove " UTC" suffix and parse
955
+ timestamp_str = stream_time_str.replace(" UTC", "")
956
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
957
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
958
+ return self._format_timestamp_for_stream(timestamp)
959
+ except:
960
+ # Fallback to current time if parsing fails
961
+ return self._format_timestamp_for_stream(time.time())
962
+ else:
963
+ return self._format_timestamp_for_stream(time.time())
964
+
965
+ def _get_start_timestamp_str(
966
+ self, stream_info: Optional[Dict[str, Any]], precision=False
967
+ ) -> str:
968
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
969
+ if not stream_info:
970
+ return "00:00:00"
971
+ if precision:
972
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
973
+ return "00:00:00"
974
+ else:
975
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
976
+
977
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
978
+ # If video format, start from 00:00:00
979
+ return "00:00:00"
980
+ else:
981
+ # For streams, use tracking start time or current time with minutes/seconds reset
982
+ if self._tracking_start_time is None:
983
+ # Try to extract timestamp from stream_time string
984
+ stream_time_str = (
985
+ stream_info.get("input_settings", {})
986
+ .get("stream_info", {})
987
+ .get("stream_time", "")
988
+ )
989
+ if stream_time_str:
990
+ try:
991
+ # Remove " UTC" suffix and parse
992
+ timestamp_str = stream_time_str.replace(" UTC", "")
993
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
994
+ self._tracking_start_time = dt.replace(
995
+ tzinfo=timezone.utc
996
+ ).timestamp()
997
+ except:
998
+ # Fallback to current time if parsing fails
999
+ self._tracking_start_time = time.time()
1000
+ else:
1001
+ self._tracking_start_time = time.time()
1002
+
1003
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1004
+ # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
1005
+ dt = dt.replace(minute=0, second=0, microsecond=0)
1006
+ return dt.strftime("%Y:%m:%d %H:%M:%S")
1007
+
1008
+ # ------------------------------------------------------------------ #
1009
+ # Helper to merge masks back into detections #
1010
+ # ------------------------------------------------------------------ #
1011
+ def _attach_masks_to_detections(
1012
+ self,
1013
+ processed_detections: List[Dict[str, Any]],
1014
+ raw_detections: List[Dict[str, Any]],
1015
+ iou_threshold: float = 0.5,
1016
+ ) -> List[Dict[str, Any]]:
1017
+ """
1018
+ Attach segmentation masks from the original `raw_detections` list to the
1019
+ `processed_detections` list returned after smoothing/tracking.
1020
+
1021
+ Matching between detections is performed using Intersection-over-Union
1022
+ (IoU) of the bounding boxes. For each processed detection we select the
1023
+ raw detection with the highest IoU above `iou_threshold` and copy its
1024
+ `masks` (or `mask`) field. If no suitable match is found, the detection
1025
+ keeps an empty list for `masks` to maintain a consistent schema.
1026
+ """
1027
+
1028
+ if not processed_detections or not raw_detections:
1029
+ # Nothing to do – ensure masks key exists for downstream logic.
1030
+ for det in processed_detections:
1031
+ det.setdefault("masks", [])
1032
+ return processed_detections
1033
+
1034
+ # Track which raw detections have already been matched to avoid
1035
+ # assigning the same mask to multiple processed detections.
1036
+ used_raw_indices = set()
1037
+
1038
+ for det in processed_detections:
1039
+ best_iou = 0.0
1040
+ best_idx = None
1041
+
1042
+ for idx, raw_det in enumerate(raw_detections):
1043
+ if idx in used_raw_indices:
1044
+ continue
1045
+
1046
+ iou = self._compute_iou(
1047
+ det.get("bounding_box"), raw_det.get("bounding_box")
1048
+ )
1049
+ if iou > best_iou:
1050
+ best_iou = iou
1051
+ best_idx = idx
1052
+
1053
+ if best_idx is not None and best_iou >= iou_threshold:
1054
+ raw_det = raw_detections[best_idx]
1055
+ masks = raw_det.get("masks", raw_det.get("mask"))
1056
+ if masks is not None:
1057
+ det["masks"] = masks
1058
+ used_raw_indices.add(best_idx)
1059
+ else:
1060
+ # No adequate match – default to empty list to keep schema consistent.
1061
+ det.setdefault("masks", ["EMPTY"])
1062
+
1063
+ return processed_detections
1064
+
1065
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
1066
+ """
1067
+ Extract prediction details for output (category, confidence, bounding box).
1068
+ """
1069
+ return [
1070
+ {
1071
+ "category": det.get("category", "unknown"),
1072
+ "confidence": det.get("confidence", 0.0),
1073
+ "bounding_box": det.get("bounding_box", {}),
1074
+ "mask": det.get("mask", det.get("masks", None)), # Accept either key
1075
+ }
1076
+ for det in detections
1077
+ ]
1078
+
1079
+ # ------------------------------------------------------------------ #
1080
+ # Canonical ID helpers #
1081
+ # ------------------------------------------------------------------ #
1082
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
1083
+ """Compute IoU between two bounding boxes which may be dicts or lists.
1084
+ Falls back to 0 when insufficient data is available."""
1085
+
1086
+ # Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
1087
+ def _bbox_to_list(bbox):
1088
+ if bbox is None:
1089
+ return []
1090
+ if isinstance(bbox, list):
1091
+ return bbox[:4] if len(bbox) >= 4 else []
1092
+ if isinstance(bbox, dict):
1093
+ if "xmin" in bbox:
1094
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
1095
+ if "x1" in bbox:
1096
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
1097
+ # Fallback: first four numeric values
1098
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
1099
+ return values[:4] if len(values) >= 4 else []
1100
+ return []
1101
+
1102
+ l1 = _bbox_to_list(box1)
1103
+ l2 = _bbox_to_list(box2)
1104
+ if len(l1) < 4 or len(l2) < 4:
1105
+ return 0.0
1106
+ x1_min, y1_min, x1_max, y1_max = l1
1107
+ x2_min, y2_min, x2_max, y2_max = l2
1108
+
1109
+ # Ensure correct order
1110
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
1111
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
1112
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
1113
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
1114
+
1115
+ inter_x_min = max(x1_min, x2_min)
1116
+ inter_y_min = max(y1_min, y2_min)
1117
+ inter_x_max = min(x1_max, x2_max)
1118
+ inter_y_max = min(y1_max, y2_max)
1119
+
1120
+ inter_w = max(0.0, inter_x_max - inter_x_min)
1121
+ inter_h = max(0.0, inter_y_max - inter_y_min)
1122
+ inter_area = inter_w * inter_h
1123
+
1124
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
1125
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
1126
+ union_area = area1 + area2 - inter_area
1127
+
1128
+ return (inter_area / union_area) if union_area > 0 else 0.0
1129
+
1130
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
1131
+ """Return a stable canonical ID for a raw tracker ID, merging fragmented
1132
+ tracks when IoU and temporal constraints indicate they represent the
1133
+ same physical."""
1134
+ if raw_id is None or bbox is None:
1135
+ # Nothing to merge
1136
+ return raw_id
1137
+
1138
+ now = time.time()
1139
+
1140
+ # Fast path – raw_id already mapped
1141
+ if raw_id in self._track_aliases:
1142
+ canonical_id = self._track_aliases[raw_id]
1143
+ track_info = self._canonical_tracks.get(canonical_id)
1144
+ if track_info is not None:
1145
+ track_info["last_bbox"] = bbox
1146
+ track_info["last_update"] = now
1147
+ track_info["raw_ids"].add(raw_id)
1148
+ return canonical_id
1149
+
1150
+ # Attempt to merge with an existing canonical track
1151
+ for canonical_id, info in self._canonical_tracks.items():
1152
+ # Only consider recently updated tracks
1153
+ if now - info["last_update"] > self._track_merge_time_window:
1154
+ continue
1155
+ iou = self._compute_iou(bbox, info["last_bbox"])
1156
+ if iou >= self._track_merge_iou_threshold:
1157
+ # Merge
1158
+ self._track_aliases[raw_id] = canonical_id
1159
+ info["last_bbox"] = bbox
1160
+ info["last_update"] = now
1161
+ info["raw_ids"].add(raw_id)
1162
+ return canonical_id
1163
+
1164
+ # No match – register new canonical track
1165
+ canonical_id = raw_id
1166
+ self._track_aliases[raw_id] = canonical_id
1167
+ self._canonical_tracks[canonical_id] = {
1168
+ "last_bbox": bbox,
1169
+ "last_update": now,
1170
+ "raw_ids": {raw_id},
1171
+ }
1172
+ return canonical_id
1173
+
1174
+ def _format_timestamp(self, timestamp: float) -> str:
1175
+ """Format a timestamp for human-readable output."""
1176
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime(
1177
+ "%Y-%m-%d %H:%M:%S UTC"
1178
+ )
1179
+
1180
+ def _get_tracking_start_time(self) -> str:
1181
+ """Get the tracking start time, formatted as a string."""
1182
+ if self._tracking_start_time is None:
1183
+ return "N/A"
1184
+ return self._format_timestamp(self._tracking_start_time)
1185
+
1186
+ def _set_tracking_start_time(self) -> None:
1187
+ """Set the tracking start time to the current time."""
1188
+ self._tracking_start_time = time.time()