matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,902 @@
1
+ """
2
+ field mapping Monitoring Use Case for Post-Processing
3
+
4
+ This module provides field mapping damage monitoring functionality ,
5
+ zone analysis, and alert generation.
6
+
7
+ """
8
+
9
+ from typing import Any, Dict, List, Optional
10
+ from dataclasses import asdict
11
+ import time
12
+ from datetime import datetime, timezone
13
+ import copy # Added for deep copying detections to preserve original masks
14
+
15
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
16
+ from ..utils import (
17
+ filter_by_confidence,
18
+ filter_by_categories,
19
+ apply_category_mapping,
20
+ count_objects_by_category,
21
+ count_objects_in_zones,
22
+ calculate_counting_summary,
23
+ match_results_structure,
24
+ bbox_smoothing,
25
+ BBoxSmoothingConfig,
26
+ BBoxSmoothingTracker
27
+ )
28
+ from dataclasses import dataclass, field
29
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
30
+
31
+
32
+ @dataclass
33
+ class FieldMappingConfig(BaseConfig):
34
+ """Configuration for field mapping detection use case in field mapping monitoring."""
35
+ # Smoothing configuration
36
+ enable_smoothing: bool = True
37
+ smoothing_algorithm: str = "observability" # "window" or "observability"
38
+ smoothing_window_size: int = 20
39
+ smoothing_cooldown_frames: int = 5
40
+ smoothing_confidence_range_factor: float = 0.5
41
+
42
+ # confidence thresholds
43
+ confidence_threshold: float = 0.6
44
+
45
+ usecase_categories: List[str] = field(
46
+ default_factory=lambda: [
47
+ 'Trees',
48
+ 'Agriculture land',
49
+ 'Water Bodies',
50
+ 'Agricultural land with black soil',
51
+ 'other land',
52
+ 'Buildings',
53
+ 'Road',
54
+ 'half seeded land',
55
+ 'grown agricultural land'
56
+ ]
57
+ )
58
+
59
+ target_categories: List[str] = field(
60
+ default_factory=lambda: [
61
+ 'Trees',
62
+ 'Agriculture land',
63
+ 'Water Bodies',
64
+ 'Agricultural land with black soil',
65
+ 'other land',
66
+ 'Buildings',
67
+ 'Road',
68
+ 'half seeded land',
69
+ 'grown agricultural land'
70
+ ]
71
+ )
72
+
73
+ alert_config: Optional[AlertConfig] = None
74
+
75
+ index_to_category: Optional[Dict[int, str]] = field(
76
+ default_factory=lambda: {
77
+ 0: "Trees",
78
+ 1: "Agriculture land",
79
+ 2: "Water Bodies",
80
+ 3: "Agricultural land with black soil",
81
+ 4: "other land",
82
+ 5: "Buildings",
83
+ 6: "Road",
84
+ 7: "half seeded land",
85
+ 8: "grown agricultural land"
86
+ }
87
+
88
+ )
89
+
90
+
91
+ class FieldMappingUseCase(BaseProcessor):
92
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
93
+ """
94
+ Get detailed information about track IDs (per frame).
95
+ """
96
+ # Collect all track_ids in this frame
97
+ frame_track_ids = set()
98
+ for det in detections:
99
+ tid = det.get('track_id')
100
+ if tid is not None:
101
+ frame_track_ids.add(tid)
102
+ # Use persistent total set for unique counting
103
+ total_track_ids = set()
104
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
105
+ total_track_ids.update(s)
106
+ return {
107
+ "total_count": len(total_track_ids),
108
+ "current_frame_count": len(frame_track_ids),
109
+ "total_unique_track_ids": len(total_track_ids),
110
+ "current_frame_track_ids": list(frame_track_ids),
111
+ "last_update_time": time.time(),
112
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
113
+ }
114
+
115
+ def _update_tracking_state(self, detections: list):
116
+ """
117
+ Track unique categories track_ids per category for total count after tracking.
118
+ Applies canonical ID merging to avoid duplicate counting when the underlying
119
+ tracker loses an object temporarily and assigns a new ID.
120
+ """
121
+ # Lazily initialise storage dicts
122
+ if not hasattr(self, "_per_category_total_track_ids"):
123
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
124
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
125
+
126
+ for det in detections:
127
+ cat = det.get("category")
128
+ raw_track_id = det.get("track_id")
129
+ if cat not in self.target_categories or raw_track_id is None:
130
+ continue
131
+ bbox = det.get("bounding_box", det.get("bbox"))
132
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
133
+ # Propagate canonical ID back to detection so downstream logic uses it
134
+ det["track_id"] = canonical_id
135
+
136
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
137
+ self._current_frame_track_ids[cat].add(canonical_id)
138
+
139
+ def get_total_counts(self):
140
+ """
141
+ Return total unique track_id count for each category.
142
+ """
143
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
144
+
145
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
146
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
147
+ hours = int(timestamp // 3600)
148
+ minutes = int((timestamp % 3600) // 60)
149
+ seconds = timestamp % 60
150
+ return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
151
+
152
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
153
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
154
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
155
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
156
+
157
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
158
+ """Get formatted current timestamp based on stream type."""
159
+ if not stream_info:
160
+ return "00:00:00.00"
161
+
162
+ is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
163
+
164
+ # if is_video_chunk:
165
+ # # For video chunks, use video_timestamp from stream_info
166
+ # video_timestamp = stream_info.get("video_timestamp", 0.0)
167
+ # return self._format_timestamp_for_video(video_timestamp)
168
+ if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
169
+ # If video format, return video timestamp
170
+ stream_time_str = stream_info.get("video_timestamp", "")
171
+ return stream_time_str[:8]
172
+ else:
173
+ # For streams, use stream_time from stream_info
174
+ stream_time_str = stream_info.get("stream_time", "")
175
+ if stream_time_str:
176
+ # Parse the high precision timestamp string to get timestamp
177
+ try:
178
+ # Remove " UTC" suffix and parse
179
+ timestamp_str = stream_time_str.replace(" UTC", "")
180
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
181
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
182
+ return self._format_timestamp_for_stream(timestamp)
183
+ except:
184
+ # Fallback to current time if parsing fails
185
+ return self._format_timestamp_for_stream(time.time())
186
+ else:
187
+ return self._format_timestamp_for_stream(time.time())
188
+
189
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
190
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
191
+ if not stream_info:
192
+ return "00:00:00"
193
+
194
+ is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
195
+
196
+ if is_video_chunk:
197
+ # For video chunks, start from 00:00:00
198
+ return "00:00:00"
199
+ elif stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
200
+ # If video format, start from 00:00:00
201
+ return "00:00:00"
202
+ else:
203
+ # For streams, use tracking start time or current time with minutes/seconds reset
204
+ if self._tracking_start_time is None:
205
+ # Try to extract timestamp from stream_time string
206
+ stream_time_str = stream_info.get("stream_time", "")
207
+ if stream_time_str:
208
+ try:
209
+ # Remove " UTC" suffix and parse
210
+ timestamp_str = stream_time_str.replace(" UTC", "")
211
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
212
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
213
+ except:
214
+ # Fallback to current time if parsing fails
215
+ self._tracking_start_time = time.time()
216
+ else:
217
+ self._tracking_start_time = time.time()
218
+
219
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
220
+ # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
221
+ dt = dt.replace(minute=0, second=0, microsecond=0)
222
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
223
+
224
+ """ Monitoring use case with smoothing and alerting."""
225
+
226
+ def __init__(self):
227
+ super().__init__("field_mapping")
228
+ self.category = "infrastructure"
229
+
230
+ # List of categories to track
231
+ self.target_categories = [
232
+ "Trees",
233
+ "Agriculture land",
234
+ "Water Bodies",
235
+ "Agricultural land with black soil",
236
+ "other land",
237
+ "Buildings",
238
+ "Road",
239
+ "half seeded land",
240
+ "grown agricultural land"
241
+ ]
242
+
243
+ # Initialize smoothing tracker
244
+ self.smoothing_tracker = None
245
+
246
+ # Initialize advanced tracker (will be created on first use)
247
+ self.tracker = None
248
+
249
+ # Initialize tracking state variables
250
+ self._total_frame_counter = 0
251
+ self._global_frame_offset = 0
252
+
253
+ # Track start time for "TOTAL SINCE" calculation
254
+ self._tracking_start_time = None
255
+
256
+ # ------------------------------------------------------------------ #
257
+ # Canonical tracking aliasing to avoid duplicate counts #
258
+ # ------------------------------------------------------------------ #
259
+ # Maps raw tracker-generated IDs to stable canonical IDs that persist
260
+ # even if the underlying tracker re-assigns a new ID after a short
261
+ # interruption. This mirrors the logic used in people_counting to
262
+ # provide accurate unique counting.
263
+ self._track_aliases: Dict[Any, Any] = {}
264
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
265
+ # Tunable parameters – adjust if necessary for specific scenarios
266
+ self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
267
+ self._track_merge_time_window: float = 7.0 # seconds within which to merge
268
+
269
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
270
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
271
+ """
272
+ Main entry point for post-processing.
273
+ Applies category mapping, smoothing, counting, alerting, and summary generation.
274
+ Returns a ProcessingResult with all relevant outputs.
275
+ """
276
+ start_time = time.time()
277
+ # Ensure config is correct type
278
+ if not isinstance(config, FieldMappingConfig):
279
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
280
+ context=context)
281
+ if context is None:
282
+ context = ProcessingContext()
283
+
284
+ # Detect input format and store in context
285
+ input_format = match_results_structure(data)
286
+ context.input_format = input_format
287
+ context.confidence_threshold = config.confidence_threshold
288
+
289
+ # Step 1: Confidence filtering
290
+ if config.confidence_threshold is not None:
291
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
292
+ else:
293
+ processed_data = data
294
+ self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
295
+
296
+ # Step 2: Apply category mapping if provided
297
+ if config.index_to_category:
298
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
299
+
300
+ # Step 3: Category filtering
301
+ if config.target_categories:
302
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
303
+
304
+ # Step 4: Apply bbox smoothing if enabled
305
+ # Deep-copy detections so that we preserve the original masks before any
306
+ # smoothing/tracking logic potentially removes them.
307
+ raw_processed_data = [copy.deepcopy(det) for det in processed_data]
308
+ if config.enable_smoothing:
309
+ if self.smoothing_tracker is None:
310
+ smoothing_config = BBoxSmoothingConfig(
311
+ smoothing_algorithm=config.smoothing_algorithm,
312
+ window_size=config.smoothing_window_size,
313
+ cooldown_frames=config.smoothing_cooldown_frames,
314
+ confidence_threshold=config.confidence_threshold,
315
+ confidence_range_factor=config.smoothing_confidence_range_factor,
316
+ enable_smoothing=True
317
+ )
318
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
319
+
320
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
321
+ # Restore masks after smoothing
322
+
323
+ # Step 5: Advanced tracking (BYTETracker-like)
324
+ try:
325
+ from ..advanced_tracker import AdvancedTracker
326
+ from ..advanced_tracker.config import TrackerConfig
327
+
328
+ # Create tracker instance if it doesn't exist (preserves state across frames)
329
+ if self.tracker is None:
330
+ tracker_config = TrackerConfig()
331
+ self.tracker = AdvancedTracker(tracker_config)
332
+ self.logger.info("Initialized AdvancedTracker for Monitoring and tracking")
333
+
334
+ processed_data = self.tracker.update(processed_data)
335
+ except Exception as e:
336
+ # If advanced tracker fails, fallback to unsmoothed detections
337
+ self.logger.warning(f"AdvancedTracker failed: {e}")
338
+
339
+ # Update tracking state for total count per label
340
+ self._update_tracking_state(processed_data)
341
+
342
+ # ------------------------------------------------------------------ #
343
+ # Re-attach segmentation masks that were present in the original input
344
+ # but may have been stripped during smoothing/tracking. We match each
345
+ # processed detection back to the raw detection with the highest IoU
346
+ # and copy over its "masks" field (if available).
347
+ # ------------------------------------------------------------------ #
348
+ processed_data = self._attach_masks_to_detections(processed_data, raw_processed_data)
349
+
350
+ # Update frame counter
351
+ self._total_frame_counter += 1
352
+
353
+ # Extract frame information from stream_info
354
+ frame_number = None
355
+ if stream_info:
356
+ input_settings = stream_info.get("input_settings", {})
357
+ start_frame = input_settings.get("start_frame")
358
+ end_frame = input_settings.get("end_frame")
359
+ # If start and end frame are the same, it's a single frame
360
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
361
+ frame_number = start_frame
362
+
363
+ # Compute summaries and alerts
364
+ general_counting_summary = calculate_counting_summary(data)
365
+ counting_summary = self._count_categories(processed_data, config)
366
+ # Add total unique counts after tracking using only local state
367
+ total_counts = self.get_total_counts()
368
+ counting_summary['total_counts'] = total_counts
369
+
370
+ insights = self._generate_insights(counting_summary, config)
371
+ alerts = self._check_alerts(counting_summary, config)
372
+ predictions = self._extract_predictions(processed_data)
373
+ summary = self._generate_summary(counting_summary, alerts)
374
+
375
+ # Step: Generate structured events and tracking stats with frame-based keys
376
+ events_list = self._generate_events(counting_summary, alerts, config, frame_number, stream_info)
377
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, insights, summary, config, frame_number,
378
+ stream_info)
379
+
380
+ # Extract frame-based dictionaries from the lists
381
+ events = events_list[0] if events_list else {}
382
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
383
+
384
+ context.mark_completed()
385
+
386
+ # Build result object
387
+ result = self.create_result(
388
+ data={
389
+ "counting_summary": counting_summary,
390
+ "general_counting_summary": general_counting_summary,
391
+ "alerts": alerts,
392
+ "total_detections": counting_summary.get("total_count", 0),
393
+ "events": events,
394
+ "tracking_stats": tracking_stats,
395
+ },
396
+ usecase=self.name,
397
+ category=self.category,
398
+ context=context
399
+ )
400
+ result.summary = summary
401
+ result.insights = insights
402
+ result.predictions = predictions
403
+ return result
404
+
405
+ def _generate_events(self, counting_summary: Dict, alerts: List, config: FieldMappingConfig,
406
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
407
+ Dict]:
408
+ """Generate structured events for the output format with frame-based keys."""
409
+ from datetime import datetime, timezone
410
+
411
+ # Use frame number as key, fallback to 'current_frame' if not available
412
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
413
+ events = [{frame_key: []}]
414
+ frame_events = events[0][frame_key]
415
+ total_detections = counting_summary.get("total_count", 0)
416
+
417
+ if total_detections > 0:
418
+ # Determine event level based on thresholds
419
+ level = "info"
420
+ intensity = 5.0
421
+ if config.alert_config and config.alert_config.count_thresholds:
422
+ threshold = config.alert_config.count_thresholds.get("all", 15)
423
+ intensity = min(10.0, (total_detections / threshold) * 10)
424
+
425
+ if intensity >= 7:
426
+ level = "critical"
427
+ elif intensity >= 5:
428
+ level = "warning"
429
+ else:
430
+ level = "info"
431
+ else:
432
+ if total_detections > 25:
433
+ level = "critical"
434
+ intensity = 9.0
435
+ elif total_detections > 15:
436
+ level = "warning"
437
+ intensity = 7.0
438
+ else:
439
+ level = "info"
440
+ intensity = min(10.0, total_detections / 3.0)
441
+
442
+ # Generate human text in new format
443
+ human_text_lines = ["EVENTS DETECTED:"]
444
+ human_text_lines.append(f" - {total_detections} detected [INFO]")
445
+ human_text = "\n".join(human_text_lines)
446
+
447
+ event = {
448
+ "type": "field_mapping",
449
+ "stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
450
+ "level": level,
451
+ "intensity": round(intensity, 1),
452
+ "config": {
453
+ "min_value": 0,
454
+ "max_value": 10,
455
+ "level_settings": {"info": 2, "warning": 5, "critical": 7}
456
+ },
457
+ "application_name": "field mapping System",
458
+ "application_version": "1.2",
459
+ "location_info": None,
460
+ "human_text": human_text
461
+ }
462
+ frame_events.append(event)
463
+
464
+ # Add alert events
465
+ for alert in alerts:
466
+ total_detections = counting_summary.get("total_count", 0)
467
+ intensity_message = "ALERT: Low congestion in the scene"
468
+ if config.alert_config and config.alert_config.count_thresholds:
469
+ threshold = config.alert_config.count_thresholds.get("all", 15)
470
+ percentage = (total_detections / threshold) * 100 if threshold > 0 else 0
471
+ if percentage < 20:
472
+ intensity_message = "ALERT: Low congestion in the scene"
473
+ elif percentage <= 50:
474
+ intensity_message = "ALERT: Moderate congestion in the scene"
475
+ elif percentage <= 70:
476
+ intensity_message = "ALERT: Heavy congestion in the scene"
477
+ else:
478
+ intensity_message = "ALERT: Severe congestion in the scene"
479
+ else:
480
+ if total_detections > 15:
481
+ intensity_message = "ALERT: Heavy congestion in the scene"
482
+ elif total_detections == 1:
483
+ intensity_message = "ALERT: Low congestion in the scene"
484
+ else:
485
+ intensity_message = "ALERT: Moderate congestion in the scene"
486
+
487
+ alert_event = {
488
+ "type": alert.get("type", "congestion_alert"),
489
+ "stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
490
+ "level": alert.get("severity", "warning"),
491
+ "intensity": 8.0,
492
+ "config": {
493
+ "min_value": 0,
494
+ "max_value": 10,
495
+ "level_settings": {"info": 2, "warning": 5, "critical": 7}
496
+ },
497
+ "application_name": "Congestion Alert System",
498
+ "application_version": "1.2",
499
+ "location_info": alert.get("zone"),
500
+ "human_text": f"{datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')} : {intensity_message}"
501
+ }
502
+ frame_events.append(alert_event)
503
+
504
+ return events
505
+
506
+ def _generate_tracking_stats(
507
+ self,
508
+ counting_summary: Dict,
509
+ insights: List[str],
510
+ summary: str,
511
+ config: FieldMappingConfig,
512
+ frame_number: Optional[int] = None,
513
+ stream_info: Optional[Dict[str, Any]] = None
514
+ ) -> List[Dict]:
515
+ """Generate structured tracking stats for the output format with frame-based keys, including track_ids_info and detections with masks."""
516
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
517
+ tracking_stats = [{frame_key: []}]
518
+ frame_tracking_stats = tracking_stats[0][frame_key]
519
+
520
+ total_detections = counting_summary.get("total_count", 0)
521
+ total_counts = counting_summary.get("total_counts", {})
522
+ cumulative_total = sum(total_counts.values()) if total_counts else 0
523
+ per_category_count = counting_summary.get("per_category_count", {})
524
+
525
+ track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
526
+
527
+ current_timestamp = self._get_current_timestamp_str(stream_info)
528
+ start_timestamp = self._get_start_timestamp_str(stream_info)
529
+
530
+ human_text_lines = []
531
+
532
+ # CURRENT FRAME section
533
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
534
+ if total_detections > 0:
535
+ category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
536
+ if len(category_counts) == 1:
537
+ detection_text = category_counts[0] + " detected"
538
+ elif len(category_counts) == 2:
539
+ detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
540
+ else:
541
+ detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
542
+ human_text_lines.append(f"\t- {detection_text}")
543
+ else:
544
+ human_text_lines.append(f"\t- No detections")
545
+
546
+ human_text_lines.append("") # spacing
547
+
548
+ # TOTAL SINCE section
549
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
550
+ human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
551
+ # Add category-wise counts
552
+ if total_counts:
553
+ for cat, count in total_counts.items():
554
+ if count > 0: # Only include categories with non-zero counts
555
+ human_text_lines.append(f"\t- {cat}: {count}")
556
+
557
+ human_text = "\n".join(human_text_lines)
558
+
559
+ # Include detections with masks from counting_summary
560
+ detections = [
561
+ {
562
+ "category": det.get("category"),
563
+ "confidence": det.get("confidence"),
564
+ "bounding_box": det.get("bounding_box"),
565
+ "track_id": det.get("track_id"),
566
+ "frame_id": det.get("frame_id"),
567
+ "masks": det.get("masks", det.get("mask", [])) # Include masks, fallback to empty list
568
+ }
569
+ for det in counting_summary.get("detections", [])
570
+ ]
571
+
572
+ tracking_stat = {
573
+ "type": "field_mapping",
574
+ "category": "infrastructure",
575
+ "count": total_detections,
576
+ "insights": insights,
577
+ "summary": summary,
578
+ "timestamp": datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC'),
579
+ "human_text": human_text,
580
+ "track_ids_info": track_ids_info,
581
+ "global_frame_offset": getattr(self, '_global_frame_offset', 0),
582
+ "local_frame_id": frame_key,
583
+ "detections": detections # Add detections with masks
584
+ }
585
+
586
+ frame_tracking_stats.append(tracking_stat)
587
+ return tracking_stats
588
+
589
+ def _count_categories(self, detections: list, config: FieldMappingConfig) -> dict:
590
+ """
591
+ Count the number of detections per category and return a summary dict.
592
+ The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', 'masks', etc.
593
+ Output structure will include 'track_id' and 'masks' for each detection as per AdvancedTracker output.
594
+ """
595
+ counts = {}
596
+ valid_detections = []
597
+ for det in detections:
598
+ cat = det.get('category', 'unknown')
599
+ if not all(k in det for k in ['category', 'confidence', 'bounding_box']): # Validate required fields
600
+ self.logger.warning(f"Skipping invalid detection: {det}")
601
+ continue
602
+ counts[cat] = counts.get(cat, 0) + 1
603
+ valid_detections.append({
604
+ "bounding_box": det.get("bounding_box"),
605
+ "category": det.get("category"),
606
+ "confidence": det.get("confidence"),
607
+ "track_id": det.get("track_id"),
608
+ "frame_id": det.get("frame_id"),
609
+ "masks": det.get("masks", det.get("mask", [])) # Include masks, fallback to empty list
610
+ })
611
+ self.logger.debug(f"Valid detections after filtering: {len(valid_detections)}")
612
+ return {
613
+ "total_count": sum(counts.values()),
614
+ "per_category_count": counts,
615
+ "detections": valid_detections
616
+ }
617
+
618
+ # ------------------------------------------------------------------ #
619
+ # Helper to merge masks back into detections #
620
+ # ------------------------------------------------------------------ #
621
+ def _attach_masks_to_detections(
622
+ self,
623
+ processed_detections: List[Dict[str, Any]],
624
+ raw_detections: List[Dict[str, Any]],
625
+ iou_threshold: float = 0.5,
626
+ ) -> List[Dict[str, Any]]:
627
+ """
628
+ Attach segmentation masks from the original `raw_detections` list to the
629
+ `processed_detections` list returned after smoothing/tracking.
630
+
631
+ Matching between detections is performed using Intersection-over-Union
632
+ (IoU) of the bounding boxes. For each processed detection we select the
633
+ raw detection with the highest IoU above `iou_threshold` and copy its
634
+ `masks` (or `mask`) field. If no suitable match is found, the detection
635
+ keeps an empty list for `masks` to maintain a consistent schema.
636
+ """
637
+
638
+ if not processed_detections or not raw_detections:
639
+ # Nothing to do – ensure masks key exists for downstream logic.
640
+ for det in processed_detections:
641
+ det.setdefault("masks", [])
642
+ return processed_detections
643
+
644
+ # Track which raw detections have already been matched to avoid
645
+ # assigning the same mask to multiple processed detections.
646
+ used_raw_indices = set()
647
+
648
+ for det in processed_detections:
649
+ best_iou = 0.0
650
+ best_idx = None
651
+
652
+ for idx, raw_det in enumerate(raw_detections):
653
+ if idx in used_raw_indices:
654
+ continue
655
+
656
+ iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
657
+ if iou > best_iou:
658
+ best_iou = iou
659
+ best_idx = idx
660
+
661
+ if best_idx is not None and best_iou >= iou_threshold:
662
+ raw_det = raw_detections[best_idx]
663
+ masks = raw_det.get("masks", raw_det.get("mask"))
664
+ if masks is not None:
665
+ det["masks"] = masks
666
+ used_raw_indices.add(best_idx)
667
+ else:
668
+ # No adequate match – default to empty list to keep schema consistent.
669
+ det.setdefault("masks", ["EMPTY"])
670
+
671
+ return processed_detections
672
+
673
+ # Human-friendly display names for categories
674
+ CATEGORY_DISPLAY = {
675
+ "Trees": "Trees",
676
+ "Agriculture land": "Agriculture land",
677
+ "Water Bodies": "Water Bodies",
678
+ "Agricultural land with black soil": "Agricultural land with black soil",
679
+ "other land": "other land",
680
+ "Buildings": "Buildings",
681
+ "Road": "Road",
682
+ "half seeded land": "half seeded land",
683
+ "grown agricultural land": "grown agricultural land"
684
+ }
685
+
686
+ def _generate_insights(self, summary: dict, config: FieldMappingConfig) -> List[str]:
687
+ """
688
+ Generate human-readable insights for each category.
689
+ """
690
+ insights = []
691
+ per_cat = summary.get("per_category_count", {})
692
+ total_detections = summary.get("total_count", 0)
693
+
694
+ if total_detections == 0:
695
+ insights.append("No detections in the scene")
696
+ return insights
697
+ insights.append(f"EVENT: Detected {total_detections} in the scene")
698
+ # Intensity calculation based on threshold percentage
699
+ intensity_threshold = None
700
+ if (config.alert_config and
701
+ config.alert_config.count_thresholds and
702
+ "all" in config.alert_config.count_thresholds):
703
+ intensity_threshold = config.alert_config.count_thresholds["all"]
704
+
705
+ if intensity_threshold is not None:
706
+ # Calculate percentage relative to threshold
707
+ percentage = (total_detections / intensity_threshold) * 100
708
+
709
+ if percentage < 20:
710
+ insights.append(f"INTENSITY: Low congestion in the scene ({percentage:.1f}% of capacity)")
711
+ elif percentage <= 50:
712
+ insights.append(f"INTENSITY: Moderate congestion in the scene ({percentage:.1f}% of capacity)")
713
+ elif percentage <= 70:
714
+ insights.append(f"INTENSITY: Heavy congestion in the scene ({percentage:.1f}% of capacity)")
715
+ else:
716
+ insights.append(f"INTENSITY: Severe congestion in the scene ({percentage:.1f}% of capacity)")
717
+
718
+ for cat, count in per_cat.items():
719
+ display = self.CATEGORY_DISPLAY.get(cat, cat)
720
+ insights.append(f"{display}:{count}")
721
+ return insights
722
+
723
+ def _check_alerts(self, summary: dict, config: FieldMappingConfig) -> List[Dict]:
724
+ """
725
+ Check if any alert thresholds are exceeded and return alert dicts.
726
+ """
727
+ alerts = []
728
+ if not config.alert_config:
729
+ return alerts
730
+ total = summary.get("total_count", 0)
731
+ if config.alert_config.count_thresholds:
732
+ for category, threshold in config.alert_config.count_thresholds.items():
733
+ if category == "all" and total >= threshold:
734
+ timestamp = datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')
735
+ alert_description = f"detections count ({total}) exceeds threshold ({threshold})"
736
+ alerts.append({
737
+ "type": "count_threshold",
738
+ "severity": "warning",
739
+ "message": f"Total detections count ({total}) exceeds threshold ({threshold})",
740
+ "category": category,
741
+ "current_count": total,
742
+ "threshold": threshold
743
+ })
744
+ elif category in summary.get("per_category_count", {}):
745
+ count = summary.get("per_category_count", {})[category]
746
+ if count >= threshold:
747
+ alerts.append({
748
+ "type": "count_threshold",
749
+ "severity": "warning",
750
+ "message": f"{category} count ({count}) exceeds threshold ({threshold})",
751
+ "category": category,
752
+ "current_count": count,
753
+ "threshold": threshold
754
+ })
755
+ return alerts
756
+
757
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
758
+ """
759
+ Extract prediction details for output (category, confidence, bounding box).
760
+ """
761
+ return [
762
+ {
763
+ "category": det.get("category", "unknown"),
764
+ "confidence": det.get("confidence", 0.0),
765
+ "bounding_box": det.get("bounding_box", {}),
766
+ "mask": det.get("mask", det.get("masks", None)) # Accept either key
767
+ }
768
+ for det in detections
769
+ ]
770
+
771
+ def _generate_summary(self, summary: dict, alerts: List) -> str:
772
+ """
773
+ Generate a human_text string for the result, including per-category insights if available.
774
+ Adds a tab before each label for better formatting.
775
+ Also always includes the cumulative count so far.
776
+ """
777
+ total = summary.get("total_count", 0)
778
+ per_cat = summary.get("per_category_count", {})
779
+ cumulative = summary.get("total_counts", {})
780
+ cumulative_total = sum(cumulative.values()) if cumulative else 0
781
+ lines = []
782
+ if total > 0:
783
+ lines.append(f"{total} detections")
784
+ if per_cat:
785
+ lines.append("detections:")
786
+ for cat, count in per_cat.items():
787
+ lines.append(f"\t{cat}:{count}")
788
+ else:
789
+ lines.append("No detections")
790
+ lines.append(f"Total detections: {cumulative_total}")
791
+ if alerts:
792
+ lines.append(f"{len(alerts)} alert(s)")
793
+ return "\n".join(lines)
794
+
795
+ # ------------------------------------------------------------------ #
796
+ # Canonical ID helpers #
797
+ # ------------------------------------------------------------------ #
798
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
799
+ """Compute IoU between two bounding boxes which may be dicts or lists.
800
+ Falls back to 0 when insufficient data is available."""
801
+
802
+ # Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
803
+ def _bbox_to_list(bbox):
804
+ if bbox is None:
805
+ return []
806
+ if isinstance(bbox, list):
807
+ return bbox[:4] if len(bbox) >= 4 else []
808
+ if isinstance(bbox, dict):
809
+ if "xmin" in bbox:
810
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
811
+ if "x1" in bbox:
812
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
813
+ # Fallback: first four numeric values
814
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
815
+ return values[:4] if len(values) >= 4 else []
816
+ return []
817
+
818
+ l1 = _bbox_to_list(box1)
819
+ l2 = _bbox_to_list(box2)
820
+ if len(l1) < 4 or len(l2) < 4:
821
+ return 0.0
822
+ x1_min, y1_min, x1_max, y1_max = l1
823
+ x2_min, y2_min, x2_max, y2_max = l2
824
+
825
+ # Ensure correct order
826
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
827
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
828
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
829
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
830
+
831
+ inter_x_min = max(x1_min, x2_min)
832
+ inter_y_min = max(y1_min, y2_min)
833
+ inter_x_max = min(x1_max, x2_max)
834
+ inter_y_max = min(y1_max, y2_max)
835
+
836
+ inter_w = max(0.0, inter_x_max - inter_x_min)
837
+ inter_h = max(0.0, inter_y_max - inter_y_min)
838
+ inter_area = inter_w * inter_h
839
+
840
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
841
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
842
+ union_area = area1 + area2 - inter_area
843
+
844
+ return (inter_area / union_area) if union_area > 0 else 0.0
845
+
846
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
847
+ """Return a stable canonical ID for a raw tracker ID, merging fragmented
848
+ tracks when IoU and temporal constraints indicate they represent the
849
+ same physical."""
850
+ if raw_id is None or bbox is None:
851
+ # Nothing to merge
852
+ return raw_id
853
+
854
+ now = time.time()
855
+
856
+ # Fast path – raw_id already mapped
857
+ if raw_id in self._track_aliases:
858
+ canonical_id = self._track_aliases[raw_id]
859
+ track_info = self._canonical_tracks.get(canonical_id)
860
+ if track_info is not None:
861
+ track_info["last_bbox"] = bbox
862
+ track_info["last_update"] = now
863
+ track_info["raw_ids"].add(raw_id)
864
+ return canonical_id
865
+
866
+ # Attempt to merge with an existing canonical track
867
+ for canonical_id, info in self._canonical_tracks.items():
868
+ # Only consider recently updated tracks
869
+ if now - info["last_update"] > self._track_merge_time_window:
870
+ continue
871
+ iou = self._compute_iou(bbox, info["last_bbox"])
872
+ if iou >= self._track_merge_iou_threshold:
873
+ # Merge
874
+ self._track_aliases[raw_id] = canonical_id
875
+ info["last_bbox"] = bbox
876
+ info["last_update"] = now
877
+ info["raw_ids"].add(raw_id)
878
+ return canonical_id
879
+
880
+ # No match – register new canonical track
881
+ canonical_id = raw_id
882
+ self._track_aliases[raw_id] = canonical_id
883
+ self._canonical_tracks[canonical_id] = {
884
+ "last_bbox": bbox,
885
+ "last_update": now,
886
+ "raw_ids": {raw_id},
887
+ }
888
+ return canonical_id
889
+
890
+ def _format_timestamp(self, timestamp: float) -> str:
891
+ """Format a timestamp for human-readable output."""
892
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
893
+
894
+ def _get_tracking_start_time(self) -> str:
895
+ """Get the tracking start time, formatted as a string."""
896
+ if self._tracking_start_time is None:
897
+ return "N/A"
898
+ return self._format_timestamp(self._tracking_start_time)
899
+
900
+ def _set_tracking_start_time(self) -> None:
901
+ """Set the tracking start time to the current time."""
902
+ self._tracking_start_time = time.time()