matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,821 @@
1
+ """
2
+ leaf disease Monitoring Use Case for Post-Processing
3
+
4
+ This module provides leaf disease monitoring functionality with congestion detection,
5
+ zone analysis, and alert generation.
6
+
7
+ """
8
+
9
+ from typing import Any, Dict, List, Optional
10
+ from dataclasses import asdict
11
+ import time
12
+ from datetime import datetime, timezone
13
+
14
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
15
+ from ..utils import (
16
+ filter_by_confidence,
17
+ filter_by_categories,
18
+ apply_category_mapping,
19
+ count_objects_by_category,
20
+ count_objects_in_zones,
21
+ calculate_counting_summary,
22
+ match_results_structure,
23
+ bbox_smoothing,
24
+ BBoxSmoothingConfig,
25
+ BBoxSmoothingTracker
26
+ )
27
+ from dataclasses import dataclass, field
28
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
29
+
30
+
31
+ @dataclass
32
+ class LeafConfig(BaseConfig):
33
+ """Configuration for leaf disease detection use case in leaf disease monitoring."""
34
+ # Smoothing configuration
35
+ enable_smoothing: bool = True
36
+ smoothing_algorithm: str = "observability" # "window" or "observability"
37
+ smoothing_window_size: int = 20
38
+ smoothing_cooldown_frames: int = 5
39
+ smoothing_confidence_range_factor: float = 0.5
40
+
41
+ #confidence thresholds
42
+ confidence_threshold: float = 0.6
43
+
44
+ usecase_categories: List[str] = field(
45
+ default_factory=lambda: ["Apple Black Rod", "Apple Healthy", "Cherry Healthy", "Grape Healthy", "Grape Leaf Blight", "Grape Esca", "Cedar Apple Rust", "Cherry Powdery Mildew", "Grape Black Rot", "Apple Scab"]
46
+ )
47
+
48
+ target_categories: List[str] = field(
49
+ default_factory=lambda: ["Apple Black Rod", "Apple Healthy", "Cherry Healthy", "Grape Healthy", "Grape Leaf Blight", "Grape Esca", "Cedar Apple Rust", "Cherry Powdery Mildew", "Grape Black Rot", "Apple Scab"]
50
+
51
+ )
52
+
53
+ alert_config: Optional[AlertConfig] = None
54
+
55
+ index_to_category: Optional[Dict[int, str]] = field(
56
+ default_factory=lambda: {
57
+ 0: "Apple Black Rod",
58
+ 1: "Apple Healthy",
59
+ 2: "Cherry Healthy",
60
+ 3: "Grape Healthy",
61
+ 4: "Grape Leaf Blight",
62
+ 5: "Grape Esca",
63
+ 6: "Cedar Apple Rust",
64
+ 7: "Cherry Powdery Mildew",
65
+ 8: "Grape Black Rot",
66
+ 9: "Apple Scab"
67
+ }
68
+ )
69
+
70
+
71
+ class LeafUseCase(BaseProcessor):
72
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
73
+ """
74
+ Get detailed information about track IDs (per frame).
75
+ """
76
+ # Collect all track_ids in this frame
77
+ frame_track_ids = set()
78
+ for det in detections:
79
+ tid = det.get('track_id')
80
+ if tid is not None:
81
+ frame_track_ids.add(tid)
82
+ # Use persistent total set for unique counting
83
+ total_track_ids = set()
84
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
85
+ total_track_ids.update(s)
86
+ return {
87
+ "total_count": len(total_track_ids),
88
+ "current_frame_count": len(frame_track_ids),
89
+ "total_unique_track_ids": len(total_track_ids),
90
+ "current_frame_track_ids": list(frame_track_ids),
91
+ "last_update_time": time.time(),
92
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
93
+ }
94
+
95
+
96
+
97
+
98
+
99
+ def _update_tracking_state(self, detections: list):
100
+ """
101
+ Track unique categories track_ids per category for total count after tracking.
102
+ Applies canonical ID merging to avoid duplicate counting when the underlying
103
+ tracker loses an object temporarily and assigns a new ID.
104
+ """
105
+ # Lazily initialise storage dicts
106
+ if not hasattr(self, "_per_category_total_track_ids"):
107
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
108
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
109
+
110
+ for det in detections:
111
+ cat = det.get("category")
112
+ raw_track_id = det.get("track_id")
113
+ if cat not in self.target_categories or raw_track_id is None:
114
+ continue
115
+ bbox = det.get("bounding_box", det.get("bbox"))
116
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
117
+ # Propagate canonical ID back to detection so downstream logic uses it
118
+ det["track_id"] = canonical_id
119
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
120
+ self._current_frame_track_ids[cat].add(canonical_id)
121
+ self.logger.info(f"[LEAF] Updated tracking state: per_category_total_track_ids={self._per_category_total_track_ids}, current_frame_track_ids={self._current_frame_track_ids}")
122
+
123
+ def get_total_counts(self):
124
+ """
125
+ Return total unique track_id count for each category.
126
+ """
127
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
128
+
129
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
130
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
131
+ hours = int(timestamp // 3600)
132
+ minutes = int((timestamp % 3600) // 60)
133
+ seconds = timestamp % 60
134
+ return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
135
+
136
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
137
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
138
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
139
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
140
+
141
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
142
+ """Get formatted current timestamp based on stream type."""
143
+ if not stream_info:
144
+ return "00:00:00.00"
145
+
146
+ is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
147
+
148
+ # if is_video_chunk:
149
+ # # For video chunks, use video_timestamp from stream_info
150
+ # video_timestamp = stream_info.get("video_timestamp", 0.0)
151
+ # return self._format_timestamp_for_video(video_timestamp)
152
+ if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
153
+ # If video format, return video timestamp
154
+ stream_time_str = stream_info.get("video_timestamp", "")
155
+ return stream_time_str[:8]
156
+ else:
157
+ # For streams, use stream_time from stream_info
158
+ stream_time_str = stream_info.get("stream_time", "")
159
+ if stream_time_str:
160
+ # Parse the high precision timestamp string to get timestamp
161
+ try:
162
+ # Remove " UTC" suffix and parse
163
+ timestamp_str = stream_time_str.replace(" UTC", "")
164
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
165
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
166
+ return self._format_timestamp_for_stream(timestamp)
167
+ except:
168
+ # Fallback to current time if parsing fails
169
+ return self._format_timestamp_for_stream(time.time())
170
+ else:
171
+ return self._format_timestamp_for_stream(time.time())
172
+
173
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
174
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
175
+ if not stream_info:
176
+ return "00:00:00"
177
+
178
+ is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
179
+
180
+ if is_video_chunk:
181
+ # For video chunks, start from 00:00:00
182
+ return "00:00:00"
183
+ elif stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
184
+ # If video format, start from 00:00:00
185
+ return "00:00:00"
186
+ else:
187
+ # For streams, use tracking start time or current time with minutes/seconds reset
188
+ if self._tracking_start_time is None:
189
+ # Try to extract timestamp from stream_time string
190
+ stream_time_str = stream_info.get("stream_time", "")
191
+ if stream_time_str:
192
+ try:
193
+ # Remove " UTC" suffix and parse
194
+ timestamp_str = stream_time_str.replace(" UTC", "")
195
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
196
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
197
+ except:
198
+ # Fallback to current time if parsing fails
199
+ self._tracking_start_time = time.time()
200
+ else:
201
+ self._tracking_start_time = time.time()
202
+
203
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
204
+ # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
205
+ dt = dt.replace(minute=0, second=0, microsecond=0)
206
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
207
+
208
+ """ Monitoring use case with smoothing and alerting."""
209
+
210
+ def __init__(self):
211
+ super().__init__("leaf_det")
212
+ self.category = "agriculture"
213
+
214
+ # List of categories to track
215
+ self.target_categories = ["Apple Black Rod", "Apple Healthy", "Cherry Healthy", "Grape Healthy", "Grape Leaf Blight", "Grape Esca", "Cedar Apple Rust", "Cherry Powdery Mildew", "Grape Black Rot", "Apple Scab"]
216
+
217
+
218
+
219
+ # Initialize smoothing tracker
220
+ self.smoothing_tracker = None
221
+
222
+ # Initialize advanced tracker (will be created on first use)
223
+ self.tracker = None
224
+
225
+ # Initialize tracking state variables
226
+ self._total_frame_counter = 0
227
+ self._global_frame_offset = 0
228
+
229
+ # Track start time for "TOTAL SINCE" calculation
230
+ self._tracking_start_time = None
231
+
232
+ # ------------------------------------------------------------------ #
233
+ # Canonical tracking aliasing to avoid duplicate counts #
234
+ # ------------------------------------------------------------------ #
235
+ # Maps raw tracker-generated IDs to stable canonical IDs that persist
236
+ # even if the underlying tracker re-assigns a new ID after a short
237
+ # interruption. This mirrors the logic used in people_counting to
238
+ # provide accurate unique counting.
239
+ self._track_aliases: Dict[Any, Any] = {}
240
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
241
+ # Tunable parameters – adjust if necessary for specific scenarios
242
+ self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
243
+ self._track_merge_time_window: float = 7.0 # seconds within which to merge
244
+
245
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
246
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
247
+ """
248
+ Main entry point for post-processing.
249
+ Applies category mapping, smoothing, counting, alerting, and summary generation.
250
+ Returns a ProcessingResult with all relevant outputs.
251
+ """
252
+ start_time = time.time()
253
+ # Ensure config is correct type
254
+ if not isinstance(config, LeafConfig):
255
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
256
+ context=context)
257
+ if context is None:
258
+ context = ProcessingContext()
259
+
260
+ # Detect input format and store in context
261
+ input_format = match_results_structure(data)
262
+ context.input_format = input_format
263
+ context.confidence_threshold = config.confidence_threshold
264
+
265
+ if config.confidence_threshold is not None:
266
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
267
+ self.logger.info(f"[LEAF] Applied confidence filtering with threshold {config.confidence_threshold}, input count={len(data)}, after filter={len(processed_data)}")
268
+ else:
269
+ processed_data = data
270
+ self.logger.info(f"[LEAF] Did not apply confidence filtering, input count={len(data)}")
271
+
272
+ # Step 2: Apply category mapping if provided
273
+ if config.index_to_category:
274
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
275
+ self.logger.info(f"[LEAF] Applied category mapping, count after mapping={len(processed_data)}")
276
+
277
+ if config.target_categories:
278
+ before = len(processed_data)
279
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
280
+ self.logger.info(f"[LEAF] Applied category filtering, before={before}, after={len(processed_data)}")
281
+
282
+ # Apply bbox smoothing if enabled
283
+ if config.enable_smoothing:
284
+ if self.smoothing_tracker is None:
285
+ smoothing_config = BBoxSmoothingConfig(
286
+ smoothing_algorithm=config.smoothing_algorithm,
287
+ window_size=config.smoothing_window_size,
288
+ cooldown_frames=config.smoothing_cooldown_frames,
289
+ confidence_threshold=config.confidence_threshold, # Use threshold as default
290
+ confidence_range_factor=config.smoothing_confidence_range_factor,
291
+ enable_smoothing=True
292
+ )
293
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
294
+ before = len(processed_data)
295
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
296
+ self.logger.info(f"[LEAF] Applied bbox smoothing, before={before}, after={len(processed_data)}")
297
+
298
+
299
+ # Advanced tracking (BYTETracker-like)
300
+ try:
301
+ from ..advanced_tracker import AdvancedTracker
302
+ from ..advanced_tracker.config import TrackerConfig
303
+
304
+ # Create tracker instance if it doesn't exist (preserves state across frames)
305
+ if self.tracker is None:
306
+ tracker_config = TrackerConfig()
307
+ self.tracker = AdvancedTracker(tracker_config)
308
+ self.logger.info("[LEAF] Initialized AdvancedTracker for Monitoring and tracking")
309
+
310
+ before = len(processed_data)
311
+ processed_data = self.tracker.update(processed_data)
312
+ self.logger.info(f"[LEAF] Tracker update: before={before}, after={len(processed_data)}")
313
+
314
+ except Exception as e:
315
+ # If advanced tracker fails, fallback to unsmoothed detections
316
+ self.logger.warning(f"[LEAF] AdvancedTracker failed: {e}")
317
+
318
+
319
+
320
+
321
+ # Update tracking state for total count per label
322
+ self.logger.info(f"[LEAF] Processed data after tracking: {processed_data}")
323
+ self._update_tracking_state(processed_data)
324
+
325
+ # Update frame counter
326
+ self._total_frame_counter += 1
327
+
328
+ # Extract frame information from stream_info
329
+ frame_number = None
330
+ if stream_info:
331
+ input_settings = stream_info.get("input_settings", {})
332
+ start_frame = input_settings.get("start_frame")
333
+ end_frame = input_settings.get("end_frame")
334
+ # If start and end frame are the same, it's a single frame
335
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
336
+ frame_number = start_frame
337
+
338
+ # Compute summaries and alerts
339
+ general_counting_summary = calculate_counting_summary(data) #done
340
+ counting_summary = self._count_categories(processed_data, config) #done
341
+ self.logger.info(f"[LEAF] Counting summary: {counting_summary}")
342
+ # Add total unique counts after tracking using only local state
343
+ total_counts = self.get_total_counts() #done
344
+ self.logger.info(f"[LEAF] Total unique counts: {total_counts}")
345
+ counting_summary['total_counts'] = total_counts #done
346
+ insights = self._generate_insights(counting_summary, config)#done
347
+ self.logger.info(f"[LEAF] Insights: {insights}")
348
+ alerts = self._check_alerts(counting_summary, config)#done
349
+ self.logger.info(f"[LEAF] Alerts: {alerts}")
350
+ predictions = self._extract_predictions(processed_data)#done
351
+ self.logger.info(f"[LEAF] Predictions: {predictions}")
352
+ summary = self._generate_summary(counting_summary, alerts)#done
353
+ self.logger.info(f"[LEAF] Summary: {summary}")
354
+
355
+ # Step: Generate structured events and tracking stats with frame-based keys
356
+ events_list = self._generate_events(counting_summary, alerts, config, frame_number, stream_info)#done
357
+ self.logger.info(f"[LEAF] Events list: {events_list}")
358
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, insights, summary, config, frame_number, stream_info)
359
+ self.logger.info(f"[LEAF] Tracking stats list: {tracking_stats_list}")
360
+
361
+ # Extract frame-based dictionaries from the lists
362
+ events = events_list[0] if events_list else {}
363
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
364
+
365
+ context.mark_completed()
366
+
367
+ # Build result object
368
+ result = self.create_result(
369
+ data={
370
+ "counting_summary": counting_summary,
371
+ "general_counting_summary": general_counting_summary,
372
+ "alerts": alerts,
373
+ "total_detections": counting_summary.get("total_count", 0),
374
+ "events": events,
375
+ "tracking_stats": tracking_stats,
376
+ },
377
+ usecase=self.name,
378
+ category=self.category,
379
+ context=context
380
+ )
381
+ self.logger.info(f"[LEAF] Final result: {result.data}")
382
+ result.summary = summary
383
+ result.insights = insights
384
+ result.predictions = predictions
385
+ return result
386
+
387
+
388
+
389
+ def _generate_events(self, counting_summary: Dict, alerts: List, config: LeafConfig,
390
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
391
+ Dict]:
392
+ """Generate structured events for the output format with frame-based keys."""
393
+ from datetime import datetime, timezone
394
+
395
+ # Use frame number as key, fallback to 'current_frame' if not available
396
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
397
+ events = [{frame_key: []}]
398
+ frame_events = events[0][frame_key]
399
+ total_detections = counting_summary.get("total_count", 0)
400
+
401
+ if total_detections > 0:
402
+ # Determine event level based on thresholds
403
+ level = "info"
404
+ intensity = 5.0
405
+ if config.alert_config and config.alert_config.count_thresholds:
406
+ threshold = config.alert_config.count_thresholds.get("all", 15)
407
+ intensity = min(10.0, (total_detections / threshold) * 10)
408
+
409
+ if intensity >= 7:
410
+ level = "critical"
411
+ elif intensity >= 5:
412
+ level = "warning"
413
+ else:
414
+ level = "info"
415
+ else:
416
+ if total_detections > 25:
417
+ level = "critical"
418
+ intensity = 9.0
419
+ elif total_detections > 15:
420
+ level = "warning"
421
+ intensity = 7.0
422
+ else:
423
+ level = "info"
424
+ intensity = min(10.0, total_detections / 3.0)
425
+
426
+ # Generate human text in new format
427
+ human_text_lines = ["EVENTS DETECTED:"]
428
+ human_text_lines.append(f" - {total_detections} detected [INFO]")
429
+ human_text = "\n".join(human_text_lines)
430
+
431
+ event = {
432
+ "type": "leaf_det",
433
+ "stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
434
+ "level": level,
435
+ "intensity": round(intensity, 1),
436
+ "config": {
437
+ "min_value": 0,
438
+ "max_value": 10,
439
+ "level_settings": {"info": 2, "warning": 5, "critical": 7}
440
+ },
441
+ "application_name": "leaf detection System",
442
+ "application_version": "1.2",
443
+ "location_info": None,
444
+ "human_text": human_text
445
+ }
446
+ frame_events.append(event)
447
+
448
+ # Add alert events
449
+ for alert in alerts:
450
+ total_detections = counting_summary.get("total_count", 0)
451
+ intensity_message = "ALERT: Low congestion in the scene"
452
+ if config.alert_config and config.alert_config.count_thresholds:
453
+ threshold = config.alert_config.count_thresholds.get("all", 15)
454
+ percentage = (total_detections / threshold) * 100 if threshold > 0 else 0
455
+ if percentage < 20:
456
+ intensity_message = "ALERT: Low congestion in the scene"
457
+ elif percentage <= 50:
458
+ intensity_message = "ALERT: Moderate congestion in the scene"
459
+ elif percentage <= 70:
460
+ intensity_message = "ALERT: Heavy congestion in the scene"
461
+ else:
462
+ intensity_message = "ALERT: Severe congestion in the scene"
463
+ else:
464
+ if total_detections > 15:
465
+ intensity_message = "ALERT: Heavy congestion in the scene"
466
+ elif total_detections == 1:
467
+ intensity_message = "ALERT: Low congestion in the scene"
468
+ else:
469
+ intensity_message = "ALERT: Moderate congestion in the scene"
470
+
471
+ alert_event = {
472
+ "type": alert.get("type", "congestion_alert"),
473
+ "stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
474
+ "level": alert.get("severity", "warning"),
475
+ "intensity": 8.0,
476
+ "config": {
477
+ "min_value": 0,
478
+ "max_value": 10,
479
+ "level_settings": {"info": 2, "warning": 5, "critical": 7}
480
+ },
481
+ "application_name": "Congestion Alert System",
482
+ "application_version": "1.2",
483
+ "location_info": alert.get("zone"),
484
+ "human_text": f"{datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')} : {intensity_message}"
485
+ }
486
+ frame_events.append(alert_event)
487
+
488
+ return events
489
+
490
+ def _generate_tracking_stats(
491
+ self,
492
+ counting_summary: Dict,
493
+ insights: List[str],
494
+ summary: str,
495
+ config: LeafConfig,
496
+ frame_number: Optional[int] = None,
497
+ stream_info: Optional[Dict[str, Any]] = None
498
+ ) -> List[Dict]:
499
+ """Generate structured tracking stats for the output format with frame-based keys, including track_ids_info."""
500
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
501
+ tracking_stats = [{frame_key: []}]
502
+ frame_tracking_stats = tracking_stats[0][frame_key]
503
+
504
+ total_detections = counting_summary.get("total_count", 0)
505
+ total_counts = counting_summary.get("total_counts", {})
506
+ cumulative_total = sum(total_counts.values()) if total_counts else 0
507
+ per_category_count = counting_summary.get("per_category_count", {})
508
+
509
+ track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
510
+
511
+ current_timestamp = self._get_current_timestamp_str(stream_info)
512
+ start_timestamp = self._get_start_timestamp_str(stream_info)
513
+
514
+ human_text_lines = []
515
+
516
+ # CURRENT FRAME section
517
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
518
+ if total_detections > 0:
519
+ category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
520
+ if len(category_counts) == 1:
521
+ detection_text = category_counts[0] + " detected"
522
+ elif len(category_counts) == 2:
523
+ detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
524
+ else:
525
+ detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
526
+ human_text_lines.append(f"\t- {detection_text}")
527
+ else:
528
+ human_text_lines.append(f"\t- No detections")
529
+
530
+ human_text_lines.append("") # spacing
531
+
532
+ # TOTAL SINCE section
533
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
534
+ human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
535
+ # Add category-wise counts
536
+ if total_counts:
537
+ for cat, count in total_counts.items():
538
+ if count > 0: # Only include categories with non-zero counts
539
+ human_text_lines.append(f"\t- {cat}: {count}")
540
+
541
+ human_text = "\n".join(human_text_lines)
542
+
543
+ tracking_stat = {
544
+ "type": "leaf_det",
545
+ "category": "agriculture",
546
+ "count": total_detections,
547
+ "insights": insights,
548
+ "summary": summary,
549
+ "timestamp": datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC'),
550
+ "human_text": human_text,
551
+ "track_ids_info": track_ids_info,
552
+ "global_frame_offset": getattr(self, '_global_frame_offset', 0),
553
+ "local_frame_id": frame_key,
554
+ "detections": counting_summary.get("detections", []) # Added line to include detections
555
+ }
556
+
557
+ frame_tracking_stats.append(tracking_stat)
558
+ return tracking_stats
559
+
560
+ def _count_categories(self, detections: list, config: LeafConfig) -> dict:
561
+ """
562
+ Count the number of detections per category and return a summary dict.
563
+ The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', etc.
564
+ Output structure will include 'track_id' for each detection as per AdvancedTracker output.
565
+ """
566
+ counts = {}
567
+ for det in detections:
568
+ cat = det.get('category', 'unknown')
569
+ counts[cat] = counts.get(cat, 0) + 1
570
+ # Each detection dict will now include 'track_id' (and possibly 'frame_id')
571
+ return {
572
+ "total_count": sum(counts.values()),
573
+ "per_category_count": counts,
574
+ "detections": [
575
+ {
576
+ "bounding_box": det.get("bounding_box"),
577
+ "category": det.get("category"),
578
+ "confidence": det.get("confidence"),
579
+ "track_id": det.get("track_id"),
580
+ "frame_id": det.get("frame_id")
581
+ }
582
+ for det in detections
583
+ ]
584
+ }
585
+
586
+ # Human-friendly display names for categories
587
+ CATEGORY_DISPLAY = {
588
+
589
+ "Apple Black Rod": "Apple Black Rod",
590
+ "Apple Healthy": "Apple Healthy",
591
+ "Cherry Healthy": "Cherry Healthy",
592
+ "Grape Healthy": "Grape Healthy",
593
+ "Grape Leaf Blight": "Grape Leaf Blight",
594
+ "Grape Esca": "Grape Esca",
595
+ "Cedar Apple Rust": "Cedar Apple Rust",
596
+ "Cherry Powdery Mildew": "Cherry Powdery Mildew",
597
+ "Grape Black Rot": "Grape Black Rot",
598
+ "Apple Scab": "Apple Scab"
599
+
600
+ }
601
+
602
+
603
+
604
+ def _generate_insights(self, summary: dict, config: LeafConfig) -> List[str]:
605
+ """
606
+ Generate human-readable insights for each category.
607
+ """
608
+ insights = []
609
+ per_cat = summary.get("per_category_count", {})
610
+ total_detections = summary.get("total_count", 0)
611
+
612
+ if total_detections == 0:
613
+ insights.append("No detections in the scene")
614
+ return insights
615
+ insights.append(f"EVENT: Detected {total_detections} in the scene")
616
+ # Intensity calculation based on threshold percentage
617
+ intensity_threshold = None
618
+ if (config.alert_config and
619
+ config.alert_config.count_thresholds and
620
+ "all" in config.alert_config.count_thresholds):
621
+ intensity_threshold = config.alert_config.count_thresholds["all"]
622
+
623
+ if intensity_threshold is not None:
624
+ # Calculate percentage relative to threshold
625
+ percentage = (total_detections / intensity_threshold) * 100
626
+
627
+ if percentage < 20:
628
+ insights.append(f"INTENSITY: Low congestion in the scene ({percentage:.1f}% of capacity)")
629
+ elif percentage <= 50:
630
+ insights.append(f"INTENSITY: Moderate congestion in the scene ({percentage:.1f}% of capacity)")
631
+ elif percentage <= 70:
632
+ insights.append(f"INTENSITY: Heavy congestion in the scene ({percentage:.1f}% of capacity)")
633
+ else:
634
+ insights.append(f"INTENSITY: Severe congestion in the scene ({percentage:.1f}% of capacity)")
635
+
636
+
637
+ for cat, count in per_cat.items():
638
+ display = self.CATEGORY_DISPLAY.get(cat, cat)
639
+ insights.append(f"{display}:{count}")
640
+ return insights
641
+
642
+ def _check_alerts(self, summary: dict, config: LeafConfig) -> List[Dict]:
643
+ """
644
+ Check if any alert thresholds are exceeded and return alert dicts.
645
+ """
646
+ alerts = []
647
+ if not config.alert_config:
648
+ return alerts
649
+ total = summary.get("total_count", 0)
650
+ if config.alert_config.count_thresholds:
651
+ for category, threshold in config.alert_config.count_thresholds.items():
652
+ if category == "all" and total >= threshold:
653
+ timestamp = datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')
654
+ alert_description = f"detections count ({total}) exceeds threshold ({threshold})"
655
+ alerts.append({
656
+ "type": "count_threshold",
657
+ "severity": "warning",
658
+ "message": f"Total detections count ({total}) exceeds threshold ({threshold})",
659
+ "category": category,
660
+ "current_count": total,
661
+ "threshold": threshold
662
+ })
663
+ elif category in summary.get("per_category_count", {}):
664
+ count = summary.get("per_category_count", {})[category]
665
+ if count >= threshold:
666
+ alerts.append({
667
+ "type": "count_threshold",
668
+ "severity": "warning",
669
+ "message": f"{category} count ({count}) exceeds threshold ({threshold})",
670
+ "category": category,
671
+ "current_count": count,
672
+ "threshold": threshold
673
+ })
674
+ return alerts
675
+
676
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
677
+ """
678
+ Extract prediction details for output (category, confidence, bounding box).
679
+ """
680
+ return [
681
+ {
682
+ "category": det.get("category", "unknown"),
683
+ "confidence": det.get("confidence", 0.0),
684
+ "bounding_box": det.get("bounding_box", {})
685
+ }
686
+ for det in detections
687
+ ]
688
+
689
+ def _generate_summary(self, summary: dict, alerts: List) -> str:
690
+ """
691
+ Generate a human_text string for the result, including per-category insights if available.
692
+ Adds a tab before each label for better formatting.
693
+ Also always includes the cumulative count so far.
694
+ """
695
+ total = summary.get("total_count", 0)
696
+ per_cat = summary.get("per_category_count", {})
697
+ cumulative = summary.get("total_counts", {})
698
+ cumulative_total = sum(cumulative.values()) if cumulative else 0
699
+ lines = []
700
+ if total > 0:
701
+ lines.append(f"{total} detections")
702
+ if per_cat:
703
+ lines.append("detections:")
704
+ for cat, count in per_cat.items():
705
+ lines.append(f"\t{cat}:{count}")
706
+ else:
707
+ lines.append("No detections")
708
+ lines.append(f"Total detections: {cumulative_total}")
709
+ if alerts:
710
+ lines.append(f"{len(alerts)} alert(s)")
711
+ return "\n".join(lines)
712
+
713
+ # ------------------------------------------------------------------ #
714
+ # Canonical ID helpers #
715
+ # ------------------------------------------------------------------ #
716
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
717
+ """Compute IoU between two bounding boxes which may be dicts or lists.
718
+ Falls back to 0 when insufficient data is available."""
719
+
720
+ # Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
721
+ def _bbox_to_list(bbox):
722
+ if bbox is None:
723
+ return []
724
+ if isinstance(bbox, list):
725
+ return bbox[:4] if len(bbox) >= 4 else []
726
+ if isinstance(bbox, dict):
727
+ if "xmin" in bbox:
728
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
729
+ if "x1" in bbox:
730
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
731
+ # Fallback: first four numeric values
732
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
733
+ return values[:4] if len(values) >= 4 else []
734
+ return []
735
+
736
+ l1 = _bbox_to_list(box1)
737
+ l2 = _bbox_to_list(box2)
738
+ if len(l1) < 4 or len(l2) < 4:
739
+ return 0.0
740
+ x1_min, y1_min, x1_max, y1_max = l1
741
+ x2_min, y2_min, x2_max, y2_max = l2
742
+
743
+ # Ensure correct order
744
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
745
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
746
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
747
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
748
+
749
+ inter_x_min = max(x1_min, x2_min)
750
+ inter_y_min = max(y1_min, y2_min)
751
+ inter_x_max = min(x1_max, x2_max)
752
+ inter_y_max = min(y1_max, y2_max)
753
+
754
+ inter_w = max(0.0, inter_x_max - inter_x_min)
755
+ inter_h = max(0.0, inter_y_max - inter_y_min)
756
+ inter_area = inter_w * inter_h
757
+
758
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
759
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
760
+ union_area = area1 + area2 - inter_area
761
+
762
+ return (inter_area / union_area) if union_area > 0 else 0.0
763
+
764
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
765
+ """Return a stable canonical ID for a raw tracker ID, merging fragmented tracks when IoU and temporal constraints indicate they represent the same physical object."""
766
+ if not hasattr(self, '_canonical_id_counter'):
767
+ self._canonical_id_counter = 0
768
+ if raw_id is None or bbox is None:
769
+ # Nothing to merge
770
+ return raw_id
771
+
772
+ now = time.time()
773
+
774
+ # Fast path – raw_id already mapped
775
+ if raw_id in self._track_aliases:
776
+ canonical_id = self._track_aliases[raw_id]
777
+ track_info = self._canonical_tracks.get(canonical_id)
778
+ if track_info is not None:
779
+ track_info["last_bbox"] = bbox
780
+ track_info["last_update"] = now
781
+ track_info["raw_ids"].add(raw_id)
782
+ return canonical_id
783
+
784
+ # Attempt to merge with an existing canonical track
785
+ for canonical_id, info in self._canonical_tracks.items():
786
+ # Only consider recently updated tracks
787
+ if now - info["last_update"] > self._track_merge_time_window:
788
+ continue
789
+ iou = self._compute_iou(bbox, info["last_bbox"])
790
+ if iou >= self._track_merge_iou_threshold:
791
+ # Merge
792
+ self._track_aliases[raw_id] = canonical_id
793
+ info["last_bbox"] = bbox
794
+ info["last_update"] = now
795
+ info["raw_ids"].add(raw_id)
796
+ return canonical_id
797
+
798
+ # No match – register new canonical track with unique id
799
+ canonical_id = f"leaf_{self._canonical_id_counter}"
800
+ self._canonical_id_counter += 1
801
+ self._track_aliases[raw_id] = canonical_id
802
+ self._canonical_tracks[canonical_id] = {
803
+ "last_bbox": bbox,
804
+ "last_update": now,
805
+ "raw_ids": {raw_id},
806
+ }
807
+ return canonical_id
808
+
809
+ def _format_timestamp(self, timestamp: float) -> str:
810
+ """Format a timestamp for human-readable output."""
811
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
812
+
813
+ def _get_tracking_start_time(self) -> str:
814
+ """Get the tracking start time, formatted as a string."""
815
+ if self._tracking_start_time is None:
816
+ return "N/A"
817
+ return self._format_timestamp(self._tracking_start_time)
818
+
819
+ def _set_tracking_start_time(self) -> None:
820
+ """Set the tracking start time to the current time."""
821
+ self._tracking_start_time = time.time()