matrice-analytics 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-analytics might be problematic. Click here for more details.

Files changed (160) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +142 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3188 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +681 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +1870 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +339 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +283 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +248 -0
  35. matrice_analytics/post_processing/ocr/postprocessing.py +271 -0
  36. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  37. matrice_analytics/post_processing/post_processor.py +1153 -0
  38. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  39. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  40. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  41. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  42. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  43. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  44. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  45. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  46. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  47. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  48. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  49. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  50. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  51. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  52. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  53. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  54. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  55. matrice_analytics/post_processing/usecases/age_gender_detection.py +1043 -0
  56. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  57. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  58. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  59. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  60. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  61. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  62. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  63. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  64. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  65. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  66. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  67. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  68. matrice_analytics/post_processing/usecases/color/clip.py +232 -0
  69. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  70. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  71. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  72. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  73. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  74. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  75. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  76. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  77. matrice_analytics/post_processing/usecases/color_detection.py +1835 -0
  78. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  79. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  80. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  81. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  82. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  83. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  84. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +930 -0
  85. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  86. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  87. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  88. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  89. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  90. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  91. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  92. matrice_analytics/post_processing/usecases/fire_detection.py +1112 -0
  93. matrice_analytics/post_processing/usecases/flare_analysis.py +891 -0
  94. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  95. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  96. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  97. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  98. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  99. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  100. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  101. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  102. matrice_analytics/post_processing/usecases/license_plate_detection.py +914 -0
  103. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1194 -0
  104. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  105. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  106. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  107. matrice_analytics/post_processing/usecases/parking.py +787 -0
  108. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  109. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  110. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  111. matrice_analytics/post_processing/usecases/people_counting.py +1728 -0
  112. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  113. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  114. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  115. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  116. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  117. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  118. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  119. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  120. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  121. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  122. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  123. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  124. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  125. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  126. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  127. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  128. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  129. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  130. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  131. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  132. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  133. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  134. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +950 -0
  135. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  136. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  137. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  138. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  139. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  140. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  141. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  142. matrice_analytics/post_processing/utils/__init__.py +150 -0
  143. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  144. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  145. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  146. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  147. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  148. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  149. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  150. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  151. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  152. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  153. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  154. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  155. matrice_analytics/py.typed +0 -0
  156. matrice_analytics-0.1.2.dist-info/METADATA +481 -0
  157. matrice_analytics-0.1.2.dist-info/RECORD +160 -0
  158. matrice_analytics-0.1.2.dist-info/WHEEL +5 -0
  159. matrice_analytics-0.1.2.dist-info/licenses/LICENSE.txt +21 -0
  160. matrice_analytics-0.1.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1194 @@
1
+ from typing import Any, Dict, List, Optional
2
+ from dataclasses import asdict, dataclass, field
3
+ import time
4
+ from datetime import datetime, timezone
5
+ import copy
6
+ import tempfile
7
+ import os
8
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
9
+ from ..utils import (
10
+ filter_by_confidence,
11
+ filter_by_categories,
12
+ apply_category_mapping,
13
+ count_objects_by_category,
14
+ count_objects_in_zones,
15
+ calculate_counting_summary,
16
+ match_results_structure,
17
+ bbox_smoothing,
18
+ BBoxSmoothingConfig,
19
+ BBoxSmoothingTracker
20
+ )
21
+ # External dependencies
22
+ import cv2
23
+ import numpy as np
24
+ #import torch
25
+ import re
26
+ from collections import Counter, defaultdict
27
+ #from turbojpeg import TurboJPEG, TJPF_RGB
28
+ os.environ["ORT_LOG_SEVERITY_LEVEL"] = "3"
29
+ # Fast license-plate OCR (replaces EasyOCR)
30
+ # Attempt to import fast_plate_ocr; fall back to a stub if unavailable
31
+ try:
32
+ from fast_plate_ocr import LicensePlateRecognizer # type: ignore
33
+ except: # pragma: no cover – optional dependency may be absent
34
+ class LicensePlateRecognizer: # type: ignore
35
+ """Stub fallback when fast_plate_ocr is not installed."""
36
+ def __init__(self, *args, **kwargs):
37
+ print("fast_plate_ocr is required for LicensePlateMonitorUseCase but is not installed.")
38
+
39
+ # Internal utilities that are still required
40
+ from ..ocr.preprocessing import ImagePreprocessor
41
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
42
+
43
+ # (Catch import errors early in the logs)
44
+ try:
45
+ _ = LicensePlateRecognizer # noqa: B018 – reference to quiet linters
46
+ except Exception as _e:
47
+ print(f"Warning: fast_plate_ocr could not be imported ⇒ {_e}")
48
+
49
+
50
+ @dataclass
51
+ class LicensePlateMonitorConfig(BaseConfig):
52
+ """Configuration for License plate detection use case in License plate monitoring."""
53
+ enable_smoothing: bool = False
54
+ smoothing_algorithm: str = "observability" # "window" or "observability"
55
+ smoothing_window_size: int = 20
56
+ smoothing_cooldown_frames: int = 5
57
+ smoothing_confidence_range_factor: float = 0.5
58
+ confidence_threshold: float = 0.5
59
+ frame_skip: int = 1
60
+ fps: Optional[float] = None
61
+ bbox_format: str = "auto"
62
+ usecase_categories: List[str] = field(default_factory=lambda: ['license_plate'])
63
+ target_categories: List[str] = field(default_factory=lambda: ['license_plate'])
64
+ alert_config: Optional[AlertConfig] = None
65
+ index_to_category: Optional[Dict[int, str]] = field(default_factory=lambda: {0: "license_plate"})
66
+ language: List[str] = field(default_factory=lambda: ['en'])
67
+ country: str = field(default_factory=lambda: 'us')
68
+ ocr_mode:str = field(default_factory=lambda: "numeric") # "alphanumeric" or "numeric" or "alphabetic"
69
+
70
+ def validate(self) -> List[str]:
71
+ """Validate configuration parameters."""
72
+ errors = super().validate()
73
+ if self.confidence_threshold < 0 or self.confidence_threshold > 1:
74
+ errors.append("confidence_threshold must be between 0 and 1")
75
+ if self.frame_skip <= 0:
76
+ errors.append("frame_skip must be positive")
77
+ if self.bbox_format not in ["auto", "xmin_ymin_xmax_ymax", "x_y_width_height"]:
78
+ errors.append("bbox_format must be one of: auto, xmin_ymin_xmax_ymax, x_y_width_height")
79
+ if self.smoothing_window_size <= 0:
80
+ errors.append("smoothing_window_size must be positive")
81
+ if self.smoothing_cooldown_frames < 0:
82
+ errors.append("smoothing_cooldown_frames cannot be negative")
83
+ if self.smoothing_confidence_range_factor <= 0:
84
+ errors.append("smoothing_confidence_range_factor must be positive")
85
+ return errors
86
+
87
+ class LicensePlateMonitorUseCase(BaseProcessor):
88
+ CATEGORY_DISPLAY = {"license_plate": "license_plate"}
89
+
90
+ # --------------------------------------------------------------
91
+ # Shared resources (initialised once per process)
92
+ # --------------------------------------------------------------
93
+ _ocr_model: Optional[LicensePlateRecognizer] = None # Fast plate OCR
94
+
95
+
96
+
97
+ def __init__(self):
98
+ super().__init__("license_plate_monitor")
99
+ self.category = "license_plate_monitor"
100
+ self.target_categories = ['license_plate']
101
+ self.CASE_TYPE: Optional[str] = 'license_plate_monitor'
102
+ self.CASE_VERSION: Optional[str] = '1.3'
103
+ self.smoothing_tracker = None
104
+ self.tracker = None
105
+ self._total_frame_counter = 0
106
+ self._global_frame_offset = 0
107
+ self._tracking_start_time = None
108
+ self._track_aliases: Dict[Any, Any] = {}
109
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
110
+ self._track_merge_iou_threshold: float = 0.05
111
+ self._track_merge_time_window: float = 7.0
112
+ self._ascending_alert_list: List[int] = []
113
+ self.current_incident_end_timestamp: str = "N/A"
114
+ self._seen_plate_texts = set()
115
+ # CHANGE: Added _tracked_plate_texts to store the longest plate_text per track_id
116
+ self._tracked_plate_texts: Dict[Any, str] = {}
117
+ # Containers for text stability & uniqueness
118
+ self._unique_plate_texts: Dict[str, str] = {} # cleaned_text -> original (longest)
119
+ # NEW: track-wise frequency of cleaned texts to pick the dominant variant per track
120
+ self._track_text_counts: Dict[Any, Counter] = defaultdict(Counter) # track_id -> Counter(cleaned_text -> count)
121
+ # Helper dictionary to keep history of plate texts per track
122
+ self.helper: Dict[Any, List[str]] = {}
123
+ # Map of track_id -> current dominant plate text
124
+ self.unique_plate_track: Dict[Any, str] = {}
125
+ self.image_preprocessor = ImagePreprocessor()
126
+ # Fast OCR model (shared across instances)
127
+ if LicensePlateMonitorUseCase._ocr_model is None:
128
+ try:
129
+ LicensePlateMonitorUseCase._ocr_model = LicensePlateRecognizer('cct-s-v1-global-model')
130
+ self.logger.info("LicensePlateRecognizer loaded successfully")
131
+ except Exception as e:
132
+ self.logger.warning(f"Failed to initialise LicensePlateRecognizer: {e}")
133
+ self.ocr_model = LicensePlateMonitorUseCase._ocr_model
134
+ # OCR text history for stability checks (text → consecutive frame count)
135
+ self._text_history: Dict[str, int] = {}
136
+
137
+ self.start_timer = None
138
+ #self.reset_timer = "2025-08-19-04:22:47.187574 UTC"
139
+
140
+ # Minimum length for a valid plate (after cleaning)
141
+ self._min_plate_len = 5
142
+ # number of consecutive frames a plate must appear to be considered "stable"
143
+ self._stable_frames_required = 3
144
+ self._non_alnum_regex = re.compile(r"[^A-Za-z0-9]+")
145
+ self._ocr_mode = None
146
+ #self.jpeg = TurboJPEG()
147
+
148
+
149
+ def reset_tracker(self) -> None:
150
+ """Reset the advanced tracker instance."""
151
+ if self.tracker is not None:
152
+ self.tracker.reset()
153
+ self.logger.info("AdvancedTracker reset for new tracking session")
154
+
155
+ def reset_plate_tracking(self) -> None:
156
+ """Reset plate tracking state."""
157
+ self._seen_plate_texts = set()
158
+ # CHANGE: Reset _tracked_plate_texts
159
+ self._tracked_plate_texts = {}
160
+ self._total_frame_counter = 0
161
+ self._global_frame_offset = 0
162
+ self._text_history = {}
163
+ self._unique_plate_texts = {}
164
+ self.helper = {}
165
+ self.unique_plate_track = {}
166
+ self.logger.info("Plate tracking state reset")
167
+
168
+ def reset_all_tracking(self) -> None:
169
+ """Reset both advanced tracker and plate tracking state."""
170
+ self.reset_tracker()
171
+ self.reset_plate_tracking()
172
+ self.logger.info("All plate tracking state reset")
173
+
174
+ def process(self, data: Any, config: ConfigProtocol, input_bytes: Optional[bytes] = None,
175
+ context: Optional[ProcessingContext] = None, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
176
+ processing_start = time.time()
177
+ try:
178
+ if not isinstance(config, LicensePlateMonitorConfig):
179
+ return self.create_error_result("Invalid configuration type for license plate monitoring",
180
+ usecase=self.name, category=self.category, context=context)
181
+
182
+ if context is None:
183
+ context = ProcessingContext()
184
+
185
+ if not input_bytes:
186
+ return self.create_error_result("input_bytes (video/image) is required for license plate monitoring",
187
+ usecase=self.name, category=self.category, context=context)
188
+
189
+ print("--------------------------------------")
190
+ print("config.alert_config",config.alert_config)
191
+ print(config)
192
+ print("--------------------------------------")
193
+
194
+ # Normalize alert_config if provided as a plain dict (JS JSON)
195
+ if isinstance(getattr(config, 'alert_config', None), dict):
196
+ try:
197
+ config.alert_config = AlertConfig(**config.alert_config) # type: ignore[arg-type]
198
+ except Exception:
199
+ pass
200
+
201
+ # Initialize OCR extractor if not already done
202
+ if self.ocr_model is None:
203
+ self.logger.info("Lazy initialisation fallback (should rarely happen)")
204
+ try:
205
+ LicensePlateMonitorUseCase._ocr_model = LicensePlateRecognizer('cct-s-v1-global-model')
206
+ self.ocr_model = LicensePlateMonitorUseCase._ocr_model
207
+ except Exception as e:
208
+ return self.create_error_result(
209
+ f"Failed to initialise OCR model: {e}",
210
+ usecase=self.name,
211
+ category=self.category,
212
+ context=context,
213
+ )
214
+
215
+ input_format = match_results_structure(data)
216
+ context.input_format = input_format
217
+ context.confidence_threshold = config.confidence_threshold
218
+ self._ocr_mode = config.ocr_mode
219
+ self.logger.info(f"Processing license plate monitoring with format: {input_format.value}")
220
+
221
+ # Step 1: Apply confidence filtering 1
222
+ # print("---------CONFIDENCE FILTERING",config.confidence_threshold)
223
+ # print("---------DATA1--------------",data)
224
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
225
+ #self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
226
+
227
+ # Step 2: Apply category mapping if provided
228
+ if config.index_to_category:
229
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
230
+ #self.logger.debug("Applied category mapping")
231
+ #print("---------DATA2--------------",processed_data)
232
+ # Step 3: Filter to target categories (handle dict or list)
233
+ if isinstance(processed_data, dict):
234
+ processed_data = processed_data.get("detections", [])
235
+ # Accept case-insensitive category values and allow overriding via config
236
+ effective_targets = getattr(config, 'target_categories', self.target_categories) or self.target_categories
237
+ targets_lower = {str(cat).lower() for cat in effective_targets}
238
+ processed_data = [d for d in processed_data if str(d.get('category', '')).lower() in targets_lower]
239
+ #self.logger.debug("Applied category filtering")
240
+
241
+ raw_processed_data = [copy.deepcopy(det) for det in processed_data]
242
+ #print("---------DATA2--------------",processed_data)
243
+ # Step 4: Apply bounding box smoothing if enabled
244
+ if config.enable_smoothing:
245
+ if self.smoothing_tracker is None:
246
+ smoothing_config = BBoxSmoothingConfig(
247
+ smoothing_algorithm=config.smoothing_algorithm,
248
+ window_size=config.smoothing_window_size,
249
+ cooldown_frames=config.smoothing_cooldown_frames,
250
+ confidence_threshold=config.confidence_threshold,
251
+ confidence_range_factor=config.smoothing_confidence_range_factor,
252
+ enable_smoothing=True
253
+ )
254
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
255
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
256
+
257
+ # Step 5: Apply advanced tracking
258
+ try:
259
+ from ..advanced_tracker import AdvancedTracker
260
+ from ..advanced_tracker.config import TrackerConfig
261
+ if self.tracker is None:
262
+ tracker_config = TrackerConfig(
263
+ track_high_thresh=float(config.confidence_threshold),
264
+ track_low_thresh=max(0.05, float(config.confidence_threshold) / 2),
265
+ new_track_thresh=float(config.confidence_threshold)
266
+ )
267
+ self.tracker = AdvancedTracker(tracker_config)
268
+ self.logger.info(f"Initialized AdvancedTracker with thresholds: high={tracker_config.track_high_thresh}, "
269
+ f"low={tracker_config.track_low_thresh}, new={tracker_config.new_track_thresh}")
270
+ processed_data = self.tracker.update(processed_data)
271
+ except Exception as e:
272
+ self.logger.warning(f"AdvancedTracker failed: {e}")
273
+ #print("---------DATA3--------------",processed_data)
274
+ # Step 6: Update tracking state
275
+ self._update_tracking_state(processed_data)
276
+ #print("---------DATA4--------------",processed_data)
277
+ # Step 7: Attach masks to detections
278
+ processed_data = self._attach_masks_to_detections(processed_data, raw_processed_data)
279
+ #print("---------DATA5--------------",processed_data)
280
+ # Step 8: Perform OCR on media
281
+ ocr_analysis = self._analyze_ocr_in_media(processed_data, input_bytes, config)
282
+
283
+ #print("ocr_analysis", ocr_analysis)
284
+
285
+ # Step 9: Update plate texts
286
+ #print("---------DATA6--------------",processed_data)
287
+ processed_data = self._update_detections_with_ocr(processed_data, ocr_analysis)
288
+ self._update_plate_texts(processed_data)
289
+
290
+ # Step 10: Update frame counter
291
+ self._total_frame_counter += 1
292
+
293
+ # Step 11: Extract frame information
294
+ frame_number = None
295
+ if stream_info:
296
+ input_settings = stream_info.get("input_settings", {})
297
+ start_frame = input_settings.get("start_frame")
298
+ end_frame = input_settings.get("end_frame")
299
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
300
+ frame_number = start_frame
301
+
302
+ # Step 12: Calculate summaries
303
+ counting_summary = self._count_categories(processed_data, config)
304
+ counting_summary['total_counts'] = self.get_total_counts()
305
+
306
+ # Step 13: Generate alerts and summaries
307
+ alerts = self._check_alerts(counting_summary, frame_number, config)
308
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
309
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
310
+ business_analytics_list = []
311
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
312
+
313
+ # Step 14: Build result
314
+ incidents = incidents_list[0] if incidents_list else {}
315
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
316
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
317
+ summary = summary_list[0] if summary_list else {}
318
+ # Build LPR_dict (per-track history) and counter (dominant in last 50%)
319
+ LPR_dict = {}
320
+ counter = {}
321
+ for tid, history in self.helper.items():
322
+ if not history:
323
+ continue
324
+ LPR_dict[str(tid)] = list(history)
325
+ # dominant from last 50%
326
+ half = max(1, len(history) // 2)
327
+ window = history[-half:]
328
+ from collections import Counter as _Ctr
329
+ dom, cnt = _Ctr(window).most_common(1)[0]
330
+ counter[str(tid)] = {"plate": dom, "count": cnt}
331
+
332
+ agg_summary = {str(frame_number): {
333
+ "incidents": incidents,
334
+ "tracking_stats": tracking_stats,
335
+ "business_analytics": business_analytics,
336
+ "alerts": alerts,
337
+ "human_text": summary
338
+ }}
339
+
340
+ context.mark_completed()
341
+ result = self.create_result(
342
+ data={"agg_summary": agg_summary},
343
+ usecase=self.name,
344
+ category=self.category,
345
+ context=context
346
+ )
347
+ proc_time = time.time() - processing_start
348
+ processing_latency_ms = proc_time * 1000.0
349
+ processing_fps = (1.0 / proc_time) if proc_time > 0 else None
350
+ # Log the performance metrics using the module-level logger
351
+ print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
352
+
353
+ return result
354
+
355
+ except Exception as e:
356
+ self.logger.error(f"License plate monitoring failed: {str(e)}", exc_info=True)
357
+ if context:
358
+ context.mark_completed()
359
+ return self.create_error_result(str(e), type(e).__name__, usecase=self.name, category=self.category, context=context)
360
+
361
+ def _is_video_bytes(self, media_bytes: bytes) -> bool:
362
+ """Determine if bytes represent a video file."""
363
+ video_signatures = [
364
+ b'\x00\x00\x00\x20ftypmp4', # MP4
365
+ b'\x00\x00\x00\x18ftypmp4', # MP4 variant
366
+ b'RIFF', # AVI
367
+ b'\x1aE\xdf\xa3', # MKV/WebM
368
+ b'ftyp', # General MP4 family
369
+ ]
370
+ for signature in video_signatures:
371
+ if media_bytes.startswith(signature) or signature in media_bytes[:50]:
372
+ return True
373
+ return False
374
+
375
+ def _analyze_ocr_in_media(self, data: Any, media_bytes: bytes, config: LicensePlateMonitorConfig) -> List[Dict[str, Any]]:
376
+ """Analyze OCR of license plates in video frames or images."""
377
+ return self._analyze_ocr_in_image(data, media_bytes, config)
378
+
379
+
380
+ def _analyze_ocr_in_image(self, data: Any, image_bytes: bytes, config: LicensePlateMonitorConfig) -> List[Dict[str, Any]]:
381
+ """Analyze OCR in a single image."""
382
+ image_array = np.frombuffer(image_bytes, np.uint8)
383
+ image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
384
+ #image = self.jpeg.decode(image_bytes, pixel_format=TJPF_RGB) #cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)
385
+
386
+ if image is None:
387
+ raise RuntimeError("Failed to decode image from bytes")
388
+
389
+ rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
390
+ ocr_analysis = []
391
+ detections = self._get_frame_detections(data, "0")
392
+
393
+ #print("OCR-detections", detections)
394
+
395
+ for detection in detections:
396
+ #print("---------OCR DETECTION",detection)
397
+ if detection.get("confidence", 1.0) < config.confidence_threshold:
398
+ continue
399
+
400
+ bbox = detection.get("bounding_box", detection.get("bbox"))
401
+ #print("---------OCR BBOX",bbox)
402
+ if not bbox:
403
+ continue
404
+
405
+ crop = self._crop_bbox(rgb_image, bbox, config.bbox_format)
406
+ #print("---------OCR CROP SIZEE",crop.size)
407
+ if crop.size == 0:
408
+ continue
409
+
410
+ plate_text_raw = self._run_ocr(crop)
411
+ #print("---------OCR PLATE TEXT",plate_text_raw)
412
+ plate_text = plate_text_raw if plate_text_raw else None
413
+
414
+ ocr_record = {
415
+ "frame_id": "0",
416
+ "timestamp": 0.0,
417
+ "category": detection.get("category", ""),
418
+ "confidence": round(detection.get("confidence", 0.0), 3),
419
+ "plate_text": plate_text,
420
+ "bbox": bbox,
421
+ "detection_id": detection.get("id", f"det_{len(ocr_analysis)}"),
422
+ "track_id": detection.get("track_id")
423
+ }
424
+ ocr_analysis.append(ocr_record)
425
+
426
+ return ocr_analysis
427
+
428
+ def _crop_bbox(self, image: np.ndarray, bbox: Dict[str, Any], bbox_format: str) -> np.ndarray:
429
+ """Crop bounding box region from image."""
430
+ h, w = image.shape[:2]
431
+
432
+ if bbox_format == "auto":
433
+ if "xmin" in bbox:
434
+ bbox_format = "xmin_ymin_xmax_ymax"
435
+ elif "x" in bbox:
436
+ bbox_format = "x_y_width_height"
437
+ else:
438
+ return np.zeros((0, 0, 3), dtype=np.uint8)
439
+
440
+ if bbox_format == "xmin_ymin_xmax_ymax":
441
+ xmin = max(0, int(bbox["xmin"]))
442
+ ymin = max(0, int(bbox["ymin"]))
443
+ xmax = min(w, int(bbox["xmax"]))
444
+ ymax = min(h, int(bbox["ymax"]))
445
+ elif bbox_format == "x_y_width_height":
446
+ xmin = max(0, int(bbox["x"]))
447
+ ymin = max(0, int(bbox["y"]))
448
+ xmax = min(w, int(bbox["x"] + bbox["width"]))
449
+ ymax = min(h, int(bbox["y"] + bbox["height"]))
450
+ else:
451
+ return np.zeros((0, 0, 3), dtype=np.uint8)
452
+
453
+ return image[ymin:ymax, xmin:xmax]
454
+
455
+ # ------------------------------------------------------------------
456
+ # Fast OCR helpers
457
+ # ------------------------------------------------------------------
458
+ def _clean_text(self, text: str) -> str:
459
+ """Sanitise OCR output to keep only alphanumerics and uppercase."""
460
+ if not text:
461
+ return ""
462
+ return self._non_alnum_regex.sub('', text).upper()
463
+
464
+ def _run_ocr(self, crop: np.ndarray) -> str:
465
+ """Run OCR on a cropped plate image and return cleaned text or empty string."""
466
+ # print("---------OCR CROP22",crop)
467
+ # print("---------OCR CROP SIZE22",crop.size)
468
+
469
+ if crop is None or crop.size == 0 or self.ocr_model is None:
470
+ return ""
471
+ try:
472
+ # fast_plate_ocr expects RGB
473
+ #rgb_crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
474
+ res = self.ocr_model.run(crop)
475
+ if isinstance(res, list):
476
+ res = res[0] if res else ""
477
+ cleaned_text = self._clean_text(str(res))
478
+ if cleaned_text and len(cleaned_text) >= self._min_plate_len:
479
+ if self._ocr_mode == "numeric":
480
+ response = all(ch.isdigit() for ch in cleaned_text)
481
+ elif self._ocr_mode == "alphabetic":
482
+ response = all(ch.isalpha() for ch in cleaned_text)
483
+ elif self._ocr_mode == "alphanumeric":
484
+ response = True
485
+
486
+ if response:
487
+ return cleaned_text
488
+ else:
489
+ return ""
490
+ except Exception as exc:
491
+ self.logger.warning(f"OCR failed: {exc}")
492
+ return ""
493
+
494
+ def _get_frame_detections(self, data: Any, frame_key: str) -> List[Dict[str, Any]]:
495
+ """Extract detections for a specific frame from data."""
496
+ if isinstance(data, dict):
497
+ return data.get(frame_key, [])
498
+ elif isinstance(data, list):
499
+ return data
500
+ else:
501
+ return []
502
+
503
+ def _update_detections_with_ocr(self, detections: List[Dict[str, Any]], ocr_analysis: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
504
+ """Update detections with OCR results using track_id or bounding box for matching."""
505
+ #print("---------UPDATE DETECTIONS WITH OCR",ocr_analysis)
506
+ ocr_dict = {}
507
+ for rec in ocr_analysis:
508
+ if rec.get("plate_text"):
509
+ # Primary key: track_id
510
+ track_id = rec.get("track_id")
511
+ if track_id is not None:
512
+ ocr_dict[track_id] = rec["plate_text"]
513
+ # Fallback key: bounding box as tuple
514
+ else:
515
+ bbox_key = tuple(sorted(rec["bbox"].items())) if rec.get("bbox") else None
516
+ if bbox_key:
517
+ ocr_dict[bbox_key] = rec["plate_text"]
518
+ #self.logger.info(f"OCR record: track_id={track_id}, plate_text={rec.get('plate_text')}, bbox={rec.get('bbox')}")
519
+
520
+ #print("---------UPDATE DETECTIONS WITH OCR -II",ocr_dict)
521
+ for det in detections:
522
+ track_id = det.get("track_id")
523
+ bbox_key = tuple(sorted(det.get("bounding_box", det.get("bbox", {})).items())) if det.get("bounding_box") or det.get("bbox") else None
524
+ plate_text = None
525
+ if track_id is not None and track_id in ocr_dict:
526
+ plate_text = ocr_dict[track_id]
527
+ elif bbox_key and bbox_key in ocr_dict:
528
+ plate_text = ocr_dict[bbox_key]
529
+ det["plate_text"] = plate_text
530
+ #self.logger.info(f"Detection track_id={track_id}, bbox={det.get('bounding_box')}: Assigned plate_text={plate_text}")
531
+ return detections
532
+
533
+ def _count_categories(self, detections: List[Dict], config: LicensePlateMonitorConfig) -> Dict[str, Any]:
534
+ """Count unique licence-plate texts per frame and attach detections."""
535
+ unique_texts: set = set()
536
+ valid_detections: List[Dict[str, Any]] = []
537
+
538
+ # Group detections by track_id for per-track dominance
539
+ tracks: Dict[Any, List[Dict[str, Any]]] = {}
540
+ for det in detections:
541
+ if not all(k in det for k in ['category', 'confidence', 'bounding_box']):
542
+ continue
543
+ tid = det.get('track_id')
544
+ if tid is None:
545
+ # If no track id, treat as its own pseudo-track keyed by bbox
546
+ tid = (det.get("bounding_box") or det.get("bbox"))
547
+ tracks.setdefault(tid, []).append(det)
548
+
549
+ for tid, dets in tracks.items():
550
+ # Pick a representative bbox (first occurrence)
551
+ rep = dets[0]
552
+ cat = rep.get('category', '')
553
+ bbox = rep.get('bounding_box')
554
+ conf = rep.get('confidence')
555
+ frame_id = rep.get('frame_id')
556
+
557
+ # Compute dominant text for this track from last 50% of history
558
+ dominant_text = None
559
+ history = self.helper.get(tid, [])
560
+ if history:
561
+ half = max(1, len(history) // 2)
562
+ window = history[-half:]
563
+ from collections import Counter as _Ctr
564
+ dominant_text, _ = _Ctr(window).most_common(1)[0]
565
+ elif rep.get('plate_text'):
566
+ candidate = self._clean_text(rep.get('plate_text', ''))
567
+ if self._min_plate_len <= len(candidate) <= 6:
568
+ dominant_text = candidate
569
+
570
+ # Fallback to already computed per-track mapping
571
+ if not dominant_text:
572
+ dominant_text = self.unique_plate_track.get(tid)
573
+
574
+ # Enforce length 5–6 and uniqueness per frame
575
+ if dominant_text and self._min_plate_len <= len(dominant_text) <= 6:
576
+ unique_texts.add(dominant_text)
577
+ valid_detections.append({
578
+ "bounding_box": bbox,
579
+ "category": cat,
580
+ "confidence": conf,
581
+ "track_id": rep.get('track_id'),
582
+ "frame_id": frame_id,
583
+ "masks": rep.get("masks", []),
584
+ "plate_text": dominant_text
585
+ })
586
+
587
+ counts = {"License_Plate": len(unique_texts)} if unique_texts else {}
588
+
589
+ return {
590
+ "total_count": len(unique_texts),
591
+ "per_category_count": counts,
592
+ "detections": valid_detections
593
+ }
594
+
595
+ def _generate_tracking_stats(self, counting_summary: Dict, alerts: Any, config: LicensePlateMonitorConfig,
596
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
597
+ """Generate structured tracking stats with frame-based keys."""
598
+ tracking_stats = []
599
+ total_detections = counting_summary.get("total_count", 0)
600
+ total_counts = counting_summary.get("total_counts", {})
601
+ cumulative_total = sum(set(total_counts.values())) if total_counts else 0
602
+ per_category_count = counting_summary.get("per_category_count", {})
603
+ track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
604
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
605
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
606
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
607
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
608
+ camera_info = self.get_camera_info_from_stream(stream_info)
609
+
610
+ human_text_lines = []
611
+ #print("counting_summary", counting_summary)
612
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
613
+ if total_detections > 0:
614
+ category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
615
+ detection_text = category_counts[0] + " detected" if len(category_counts) == 1 else f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
616
+ human_text_lines.append(f"\t- {detection_text}")
617
+ # Show dominant per-track license plates for current frame
618
+ seen = set()
619
+ display_texts = []
620
+ for det in counting_summary.get("detections", []):
621
+ t = det.get("track_id")
622
+ dom = det.get("plate_text")
623
+ if not dom or not (self._min_plate_len <= len(dom) <= 6):
624
+ continue
625
+ if t in seen:
626
+ continue
627
+ seen.add(t)
628
+ display_texts.append(dom)
629
+ if display_texts:
630
+ human_text_lines.append(f"\t- License Plates: {', '.join(display_texts)}")
631
+ else:
632
+ human_text_lines.append(f"\t- No detections")
633
+
634
+ human_text_lines.append("")
635
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
636
+ human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
637
+
638
+ if self._unique_plate_texts:
639
+ human_text_lines.append("\t- Unique License Plates:")
640
+ for text in sorted(self._unique_plate_texts.values()):
641
+ human_text_lines.append(f"\t\t- {text}")
642
+
643
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
644
+ total_counts_list = [{"category": cat, "count": count} for cat, count in total_counts.items() if count > 0 or cumulative_total > 0]
645
+
646
+ human_text = "\n".join(human_text_lines)
647
+ detections = []
648
+ for detection in counting_summary.get("detections", []):
649
+ dom = detection.get("plate_text", "")
650
+ if not dom:
651
+ dom = "license_plate"
652
+ bbox = detection.get("bounding_box", {})
653
+ category = detection.get("category", "license_plate")
654
+ segmentation = detection.get("masks", detection.get("segmentation", detection.get("mask", [])))
655
+ detection_obj = self.create_detection_object(category, bbox, segmentation=None, plate_text=dom)
656
+ detections.append(detection_obj)
657
+
658
+ alert_settings = []
659
+ # Build alert settings tolerating dict or dataclass for alert_config
660
+ if config.alert_config:
661
+ alert_cfg = config.alert_config
662
+ alert_type = getattr(alert_cfg, 'alert_type', None) if not isinstance(alert_cfg, dict) else alert_cfg.get('alert_type')
663
+ alert_value = getattr(alert_cfg, 'alert_value', None) if not isinstance(alert_cfg, dict) else alert_cfg.get('alert_value')
664
+ count_thresholds = getattr(alert_cfg, 'count_thresholds', None) if not isinstance(alert_cfg, dict) else alert_cfg.get('count_thresholds')
665
+ alert_type = alert_type if isinstance(alert_type, list) else (list(alert_type) if alert_type is not None else ['Default'])
666
+ alert_value = alert_value if isinstance(alert_value, list) else (list(alert_value) if alert_value is not None else ['JSON'])
667
+ alert_settings.append({
668
+ "alert_type": alert_type,
669
+ "incident_category": self.CASE_TYPE,
670
+ "threshold_level": count_thresholds or {},
671
+ "ascending": True,
672
+ "settings": {t: v for t, v in zip(alert_type, alert_value)}
673
+ })
674
+
675
+ if alerts:
676
+ human_text_lines.append(f"Alerts: {alerts[0].get('settings', {})}")
677
+ else:
678
+ human_text_lines.append("Alerts: None")
679
+
680
+ human_text = "\n".join(human_text_lines)
681
+ reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
682
+
683
+ tracking_stat = self.create_tracking_stats(
684
+ total_counts=total_counts_list,
685
+ current_counts=current_counts,
686
+ detections=detections,
687
+ human_text=human_text,
688
+ camera_info=camera_info,
689
+ alerts=alerts,
690
+ alert_settings=alert_settings,
691
+ reset_settings=reset_settings,
692
+ start_time=high_precision_start_timestamp,
693
+ reset_time=high_precision_reset_timestamp
694
+ )
695
+ tracking_stats.append(tracking_stat)
696
+ return tracking_stats
697
+
698
+ def _check_alerts(self, summary: Dict, frame_number: Any, config: LicensePlateMonitorConfig) -> List[Dict]:
699
+ """Check if any alert thresholds are exceeded."""
700
+ def get_trend(data, lookback=900, threshold=0.6):
701
+ window = data[-lookback:] if len(data) >= lookback else data
702
+ if len(window) < 2:
703
+ return True
704
+ increasing = sum(1 for i in range(1, len(window)) if window[i] >= window[i - 1])
705
+ return increasing / (len(window) - 1) >= threshold
706
+
707
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
708
+ alerts = []
709
+ total_detections = summary.get("total_count", 0)
710
+ total_counts_dict = summary.get("total_counts", {})
711
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
712
+ per_category_count = summary.get("per_category_count", {})
713
+
714
+ if not config.alert_config:
715
+ return alerts
716
+
717
+ # Extract thresholds regardless of dict/dataclass
718
+ _alert_cfg = config.alert_config
719
+ _thresholds = getattr(_alert_cfg, 'count_thresholds', None) if not isinstance(_alert_cfg, dict) else _alert_cfg.get('count_thresholds')
720
+ _types = getattr(_alert_cfg, 'alert_type', None) if not isinstance(_alert_cfg, dict) else _alert_cfg.get('alert_type')
721
+ _values = getattr(_alert_cfg, 'alert_value', None) if not isinstance(_alert_cfg, dict) else _alert_cfg.get('alert_value')
722
+ _types = _types if isinstance(_types, list) else (list(_types) if _types is not None else ['Default'])
723
+ _values = _values if isinstance(_values, list) else (list(_values) if _values is not None else ['JSON'])
724
+ if _thresholds:
725
+ for category, threshold in _thresholds.items():
726
+ if category == "all" and total_detections > threshold:
727
+ alerts.append({
728
+ "alert_type": _types,
729
+ "alert_id": f"alert_{category}_{frame_key}",
730
+ "incident_category": self.CASE_TYPE,
731
+ "threshold_level": threshold,
732
+ "ascending": get_trend(self._ascending_alert_list),
733
+ "settings": {t: v for t, v in zip(_types, _values)}
734
+ })
735
+ elif category in per_category_count and per_category_count[category] > threshold:
736
+ alerts.append({
737
+ "alert_type": _types,
738
+ "alert_id": f"alert_{category}_{frame_key}",
739
+ "incident_category": self.CASE_TYPE,
740
+ "threshold_level": threshold,
741
+ "ascending": get_trend(self._ascending_alert_list),
742
+ "settings": {t: v for t, v in zip(_types, _values)}
743
+ })
744
+ return alerts
745
+
746
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: LicensePlateMonitorConfig,
747
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
748
+ """Generate structured incidents."""
749
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
750
+ incidents = []
751
+ total_detections = counting_summary.get("total_count", 0)
752
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
753
+ camera_info = self.get_camera_info_from_stream(stream_info)
754
+
755
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
756
+
757
+ if total_detections > 0:
758
+ level = "low"
759
+ intensity = 5.0
760
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
761
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
762
+ self.current_incident_end_timestamp = 'Incident still active'
763
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
764
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
765
+ self.current_incident_end_timestamp = current_timestamp
766
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
767
+ self.current_incident_end_timestamp = 'N/A'
768
+
769
+ if config.alert_config and config.alert_config.count_thresholds:
770
+ threshold = config.alert_config.count_thresholds.get("all", 15)
771
+ intensity = min(10.0, (total_detections / threshold) * 10)
772
+ if intensity >= 9:
773
+ level = "critical"
774
+ self._ascending_alert_list.append(3)
775
+ elif intensity >= 7:
776
+ level = "significant"
777
+ self._ascending_alert_list.append(2)
778
+ elif intensity >= 5:
779
+ level = "medium"
780
+ self._ascending_alert_list.append(1)
781
+ else:
782
+ level = "low"
783
+ self._ascending_alert_list.append(0)
784
+ else:
785
+ if total_detections > 30:
786
+ level = "critical"
787
+ intensity = 10.0
788
+ self._ascending_alert_list.append(3)
789
+ elif total_detections > 25:
790
+ level = "significant"
791
+ intensity = 9.0
792
+ self._ascending_alert_list.append(2)
793
+ elif total_detections > 15:
794
+ level = "medium"
795
+ intensity = 7.0
796
+ self._ascending_alert_list.append(1)
797
+ else:
798
+ level = "low"
799
+ intensity = min(10.0, total_detections / 3.0)
800
+ self._ascending_alert_list.append(0)
801
+
802
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
803
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
804
+ human_text = "\n".join(human_text_lines)
805
+
806
+ alert_settings = []
807
+ if config.alert_config:
808
+ _alert_cfg = config.alert_config
809
+ _types = getattr(_alert_cfg, 'alert_type', None) if not isinstance(_alert_cfg, dict) else _alert_cfg.get('alert_type')
810
+ _values = getattr(_alert_cfg, 'alert_value', None) if not isinstance(_alert_cfg, dict) else _alert_cfg.get('alert_value')
811
+ _thresholds = getattr(_alert_cfg, 'count_thresholds', None) if not isinstance(_alert_cfg, dict) else _alert_cfg.get('count_thresholds')
812
+ _types = _types if isinstance(_types, list) else (list(_types) if _types is not None else ['Default'])
813
+ _values = _values if isinstance(_values, list) else (list(_values) if _values is not None else ['JSON'])
814
+ alert_settings.append({
815
+ "alert_type": _types,
816
+ "incident_category": self.CASE_TYPE,
817
+ "threshold_level": _thresholds or {},
818
+ "ascending": True,
819
+ "settings": {t: v for t, v in zip(_types, _values)}
820
+ })
821
+
822
+ event = self.create_incident(
823
+ incident_id=f"{self.CASE_TYPE}_{frame_key}",
824
+ incident_type=self.CASE_TYPE,
825
+ severity_level=level,
826
+ human_text=human_text,
827
+ camera_info=camera_info,
828
+ alerts=alerts,
829
+ alert_settings=alert_settings,
830
+ start_time=start_timestamp,
831
+ end_time=self.current_incident_end_timestamp,
832
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
833
+ )
834
+ incidents.append(event)
835
+ else:
836
+ self._ascending_alert_list.append(0)
837
+ incidents.append({})
838
+
839
+ return incidents
840
+
841
+ def _generate_summary(self, summary: Dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
842
+ """Generate a human-readable summary."""
843
+ """
844
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
845
+ """
846
+ lines = []
847
+ lines.append("Application Name: "+self.CASE_TYPE)
848
+ lines.append("Application Version: "+self.CASE_VERSION)
849
+ if len(incidents) > 0:
850
+ lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
851
+ if len(tracking_stats) > 0:
852
+ lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
853
+ if len(business_analytics) > 0:
854
+ lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
855
+
856
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
857
+ lines.append("Summary: "+"No Summary Data")
858
+
859
+ return ["\n".join(lines)]
860
+
861
+ def _update_tracking_state(self, detections: List[Dict]):
862
+ """Track unique track_ids per category."""
863
+ if not hasattr(self, "_per_category_total_track_ids"):
864
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
865
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
866
+
867
+ for det in detections:
868
+ cat = det.get("category")
869
+ raw_track_id = det.get("track_id")
870
+ if cat not in self.target_categories or raw_track_id is None:
871
+ continue
872
+ bbox = det.get("bounding_box", det.get("bbox"))
873
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
874
+ det["track_id"] = canonical_id
875
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
876
+ self._current_frame_track_ids[cat].add(canonical_id)
877
+
878
+ def _update_plate_texts(self, detections: List[Dict]):
879
+ """Update set of seen plate texts and track the longest plate_text per track_id."""
880
+ for det in detections:
881
+ raw_text = det.get('plate_text')
882
+ track_id = det.get('track_id')
883
+ if not raw_text or track_id is None:
884
+ continue
885
+
886
+ cleaned = self._clean_text(raw_text)
887
+
888
+ # Enforce plate length 5 or 6 characters ("greater than 4 and less than 7")
889
+ if not (self._min_plate_len <= len(cleaned) <= 6):
890
+ continue
891
+
892
+ # Append to per-track rolling history (keep reasonable size)
893
+ history = self.helper.get(track_id)
894
+ if history is None:
895
+ history = []
896
+ self.helper[track_id] = history
897
+ history.append(cleaned)
898
+ if len(history) > 200:
899
+ del history[: len(history) - 200]
900
+
901
+ # Update per-track frequency counter (all-time)
902
+ self._track_text_counts[track_id][cleaned] += 1
903
+
904
+ # Update consecutive frame counter for stability across whole video
905
+ self._text_history[cleaned] = self._text_history.get(cleaned, 0) + 1
906
+
907
+ # Once stable, decide dominant text from LAST 50% of history
908
+ if self._text_history[cleaned] >= self._stable_frames_required:
909
+ half = max(1, len(history) // 2)
910
+ window = history[-half:]
911
+ from collections import Counter as _Ctr
912
+ dominant, _ = _Ctr(window).most_common(1)[0]
913
+
914
+ # Update per-track mapping to dominant
915
+ self._tracked_plate_texts[track_id] = dominant
916
+ self.unique_plate_track[track_id] = dominant
917
+
918
+ # Maintain global unique mapping with dominant only
919
+ if dominant not in self._unique_plate_texts:
920
+ self._unique_plate_texts[dominant] = dominant
921
+
922
+ # Reset counters for texts NOT seen in this frame (to preserve stability requirement)
923
+ current_frame_texts = {self._clean_text(det.get('plate_text', '')) for det in detections if det.get('plate_text')}
924
+ for t in list(self._text_history.keys()):
925
+ if t not in current_frame_texts:
926
+ self._text_history[t] = 0
927
+
928
+ def get_total_counts(self):
929
+ """Return total unique license plate texts encountered so far."""
930
+ return {'License_Plate': len(self._unique_plate_texts)}
931
+
932
+ def _get_track_ids_info(self, detections: List[Dict]) -> Dict[str, Any]:
933
+ """Get detailed information about track IDs."""
934
+ frame_track_ids = {det.get('track_id') for det in detections if det.get('track_id') is not None}
935
+ total_track_ids = set()
936
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
937
+ total_track_ids.update(s)
938
+ return {
939
+ "total_count": len(total_track_ids),
940
+ "current_frame_count": len(frame_track_ids),
941
+ "total_unique_track_ids": len(total_track_ids),
942
+ "current_frame_track_ids": list(frame_track_ids),
943
+ "last_update_time": time.time(),
944
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
945
+ }
946
+
947
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
948
+ """Compute IoU between two bounding boxes."""
949
+ def _bbox_to_list(bbox):
950
+ if bbox is None:
951
+ return []
952
+ if isinstance(bbox, list):
953
+ return bbox[:4] if len(bbox) >= 4 else []
954
+ if isinstance(bbox, dict):
955
+ if "xmin" in bbox:
956
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
957
+ if "x1" in bbox:
958
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
959
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
960
+ return values[:4] if len(values) >= 4 else []
961
+ return []
962
+
963
+ l1 = _bbox_to_list(box1)
964
+ l2 = _bbox_to_list(box2)
965
+ if len(l1) < 4 or len(l2) < 4:
966
+ return 0.0
967
+ x1_min, y1_min, x1_max, y1_max = l1
968
+ x2_min, y2_min, x2_max, y2_max = l2
969
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
970
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
971
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
972
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
973
+ inter_x_min = max(x1_min, x2_min)
974
+ inter_y_min = max(y1_min, y2_min)
975
+ inter_x_max = min(x1_max, x2_max)
976
+ inter_y_max = min(y1_max, y2_max)
977
+ inter_w = max(0.0, inter_x_max - inter_x_min)
978
+ inter_h = max(0.0, inter_y_max - inter_y_min)
979
+ inter_area = inter_w * inter_h
980
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
981
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
982
+ union_area = area1 + area2 - inter_area
983
+ return (inter_area / union_area) if union_area > 0 else 0.0
984
+
985
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
986
+ """Return a stable canonical ID for a raw tracker ID."""
987
+ if raw_id is None or bbox is None:
988
+ return raw_id
989
+ now = time.time()
990
+ if raw_id in self._track_aliases:
991
+ canonical_id = self._track_aliases[raw_id]
992
+ track_info = self._canonical_tracks.get(canonical_id)
993
+ if track_info is not None:
994
+ track_info["last_bbox"] = bbox
995
+ track_info["last_update"] = now
996
+ track_info["raw_ids"].add(raw_id)
997
+ return canonical_id
998
+ for canonical_id, info in self._canonical_tracks.items():
999
+ if now - info["last_update"] > self._track_merge_time_window:
1000
+ continue
1001
+ iou = self._compute_iou(bbox, info["last_bbox"])
1002
+ if iou >= self._track_merge_iou_threshold:
1003
+ self._track_aliases[raw_id] = canonical_id
1004
+ info["last_bbox"] = bbox
1005
+ info["last_update"] = now
1006
+ info["raw_ids"].add(raw_id)
1007
+ return canonical_id
1008
+ canonical_id = raw_id
1009
+ self._track_aliases[raw_id] = canonical_id
1010
+ self._canonical_tracks[canonical_id] = {
1011
+ "last_bbox": bbox,
1012
+ "last_update": now,
1013
+ "raw_ids": {raw_id},
1014
+ }
1015
+ return canonical_id
1016
+
1017
+ def _format_timestamp(self, timestamp: Any) -> str:
1018
+ """Format a timestamp so that exactly two digits follow the decimal point (milliseconds).
1019
+
1020
+ The input can be either:
1021
+ 1. A numeric Unix timestamp (``float`` / ``int``) – it will first be converted to a
1022
+ string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
1023
+ 2. A string already following the same layout.
1024
+
1025
+ The returned value preserves the overall format of the input but truncates or pads
1026
+ the fractional seconds portion to **exactly two digits**.
1027
+
1028
+ Example
1029
+ -------
1030
+ >>> self._format_timestamp("2025-08-19-04:22:47.187574 UTC")
1031
+ '2025-08-19-04:22:47.18 UTC'
1032
+ """
1033
+
1034
+ # Convert numeric timestamps to the expected string representation first
1035
+ if isinstance(timestamp, (int, float)):
1036
+ timestamp = datetime.fromtimestamp(timestamp, timezone.utc).strftime(
1037
+ '%Y-%m-%d-%H:%M:%S.%f UTC'
1038
+ )
1039
+
1040
+ # Ensure we are working with a string from here on
1041
+ if not isinstance(timestamp, str):
1042
+ return str(timestamp)
1043
+
1044
+ # If there is no fractional component, simply return the original string
1045
+ if '.' not in timestamp:
1046
+ return timestamp
1047
+
1048
+ # Split out the main portion (up to the decimal point)
1049
+ main_part, fractional_and_suffix = timestamp.split('.', 1)
1050
+
1051
+ # Separate fractional digits from the suffix (typically ' UTC')
1052
+ if ' ' in fractional_and_suffix:
1053
+ fractional_part, suffix = fractional_and_suffix.split(' ', 1)
1054
+ suffix = ' ' + suffix # Re-attach the space removed by split
1055
+ else:
1056
+ fractional_part, suffix = fractional_and_suffix, ''
1057
+
1058
+ # Guarantee exactly two digits for the fractional part
1059
+ fractional_part = (fractional_part + '00')[:2]
1060
+
1061
+ return f"{main_part}.{fractional_part}{suffix}"
1062
+
1063
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
1064
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
1065
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
1066
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1067
+
1068
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
1069
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
1070
+ hours = int(timestamp // 3600)
1071
+ minutes = int((timestamp % 3600) // 60)
1072
+ seconds = round(float(timestamp % 60), 2)
1073
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
1074
+
1075
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
1076
+ """Get formatted current timestamp based on stream type."""
1077
+
1078
+ if not stream_info:
1079
+ return "00:00:00.00"
1080
+ if precision:
1081
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1082
+ if frame_id:
1083
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1084
+ else:
1085
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1086
+ stream_time_str = self._format_timestamp_for_video(start_time)
1087
+
1088
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
1089
+ else:
1090
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1091
+
1092
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1093
+ if frame_id:
1094
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1095
+ else:
1096
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1097
+
1098
+ stream_time_str = self._format_timestamp_for_video(start_time)
1099
+
1100
+
1101
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
1102
+ else:
1103
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1104
+ if stream_time_str:
1105
+ try:
1106
+ timestamp_str = stream_time_str.replace(" UTC", "")
1107
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1108
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
1109
+ return self._format_timestamp_for_stream(timestamp)
1110
+ except:
1111
+ return self._format_timestamp_for_stream(time.time())
1112
+ else:
1113
+ return self._format_timestamp_for_stream(time.time())
1114
+
1115
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
1116
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
1117
+ if not stream_info:
1118
+ return "00:00:00"
1119
+
1120
+ if precision:
1121
+ if self.start_timer is None:
1122
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
1123
+ return self._format_timestamp(self.start_timer)
1124
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
1125
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
1126
+ return self._format_timestamp(self.start_timer)
1127
+ else:
1128
+ return self._format_timestamp(self.start_timer)
1129
+
1130
+ if self.start_timer is None:
1131
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
1132
+ return self._format_timestamp(self.start_timer)
1133
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
1134
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
1135
+ return self._format_timestamp(self.start_timer)
1136
+
1137
+ else:
1138
+ if self.start_timer is not None:
1139
+ return self._format_timestamp(self.start_timer)
1140
+
1141
+ if self._tracking_start_time is None:
1142
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1143
+ if stream_time_str:
1144
+ try:
1145
+ timestamp_str = stream_time_str.replace(" UTC", "")
1146
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1147
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1148
+ except:
1149
+ self._tracking_start_time = time.time()
1150
+ else:
1151
+ self._tracking_start_time = time.time()
1152
+
1153
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1154
+ dt = dt.replace(minute=0, second=0, microsecond=0)
1155
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1156
+
1157
+ def _get_tracking_start_time(self) -> str:
1158
+ """Get the tracking start time, formatted as a string."""
1159
+ if self._tracking_start_time is None:
1160
+ return "N/A"
1161
+ return self._format_timestamp(self._tracking_start_time)
1162
+
1163
+ def _set_tracking_start_time(self) -> None:
1164
+ """Set the tracking start time to the current time."""
1165
+ self._tracking_start_time = time.time()
1166
+
1167
+ def _attach_masks_to_detections(self, processed_detections: List[Dict[str, Any]], raw_detections: List[Dict[str, Any]],
1168
+ iou_threshold: float = 0.5) -> List[Dict[str, Any]]:
1169
+ """Attach segmentation masks from raw detections to processed detections."""
1170
+ if not processed_detections or not raw_detections:
1171
+ for det in processed_detections:
1172
+ det.setdefault("masks", [])
1173
+ return processed_detections
1174
+
1175
+ used_raw_indices = set()
1176
+ for det in processed_detections:
1177
+ best_iou = 0.0
1178
+ best_idx = None
1179
+ for idx, raw_det in enumerate(raw_detections):
1180
+ if idx in used_raw_indices:
1181
+ continue
1182
+ iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
1183
+ if iou > best_iou:
1184
+ best_iou = iou
1185
+ best_idx = idx
1186
+ if best_idx is not None and best_iou >= iou_threshold:
1187
+ raw_det = raw_detections[best_idx]
1188
+ masks = raw_det.get("masks", raw_det.get("mask"))
1189
+ if masks is not None:
1190
+ det["masks"] = masks
1191
+ used_raw_indices.add(best_idx)
1192
+ else:
1193
+ det.setdefault("masks", ["EMPTY"])
1194
+ return processed_detections