matrice-analytics 0.1.2__py3-none-any.whl → 0.1.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-analytics might be problematic. Click here for more details.
- matrice_analytics/post_processing/advanced_tracker/matching.py +3 -3
- matrice_analytics/post_processing/advanced_tracker/strack.py +1 -1
- matrice_analytics/post_processing/face_reg/compare_similarity.py +5 -5
- matrice_analytics/post_processing/face_reg/embedding_manager.py +14 -7
- matrice_analytics/post_processing/face_reg/face_recognition.py +123 -34
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +332 -82
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +29 -22
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +0 -1
- matrice_analytics/post_processing/post_processor.py +19 -5
- matrice_analytics/post_processing/usecases/color/clip.py +292 -132
- matrice_analytics/post_processing/usecases/color/color_mapper.py +2 -2
- matrice_analytics/post_processing/usecases/color_detection.py +429 -355
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +41 -386
- matrice_analytics/post_processing/usecases/flare_analysis.py +1 -56
- matrice_analytics/post_processing/usecases/license_plate_detection.py +476 -202
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +252 -11
- matrice_analytics/post_processing/usecases/people_counting.py +408 -1431
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +39 -10
- matrice_analytics/post_processing/utils/__init__.py +8 -8
- {matrice_analytics-0.1.2.dist-info → matrice_analytics-0.1.31.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.2.dist-info → matrice_analytics-0.1.31.dist-info}/RECORD +59 -24
- {matrice_analytics-0.1.2.dist-info → matrice_analytics-0.1.31.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.2.dist-info → matrice_analytics-0.1.31.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.2.dist-info → matrice_analytics-0.1.31.dist-info}/top_level.txt +0 -0
|
@@ -15,7 +15,6 @@ from ..utils import (
|
|
|
15
15
|
filter_by_categories,
|
|
16
16
|
apply_category_mapping,
|
|
17
17
|
match_results_structure,
|
|
18
|
-
extract_major_colors,
|
|
19
18
|
count_objects_by_category,
|
|
20
19
|
calculate_counting_summary,
|
|
21
20
|
match_results_structure,
|
|
@@ -26,7 +25,11 @@ from ..utils import (
|
|
|
26
25
|
)
|
|
27
26
|
from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
|
|
28
27
|
from ..usecases.color.clip import ClipProcessor
|
|
29
|
-
|
|
28
|
+
import sys
|
|
29
|
+
from pathlib import Path
|
|
30
|
+
import logging
|
|
31
|
+
import subprocess
|
|
32
|
+
import shutil
|
|
30
33
|
|
|
31
34
|
@dataclass
|
|
32
35
|
class ColorDetectionConfig(BaseConfig):
|
|
@@ -78,21 +81,22 @@ class ColorDetectionConfig(BaseConfig):
|
|
|
78
81
|
alert_config: Optional[AlertConfig] = None
|
|
79
82
|
time_window_minutes: int = 60
|
|
80
83
|
enable_unique_counting: bool = True
|
|
81
|
-
enable_smoothing: bool =
|
|
84
|
+
enable_smoothing: bool = False
|
|
82
85
|
smoothing_algorithm: str = "observability"
|
|
83
86
|
smoothing_window_size: int = 20
|
|
84
87
|
smoothing_cooldown_frames: int = 5
|
|
85
88
|
smoothing_confidence_range_factor: float = 0.5
|
|
89
|
+
detector = True
|
|
86
90
|
|
|
87
91
|
#JBK_720_GATE POLYGON = [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]]
|
|
88
|
-
zone_config: Optional[Dict[str, List[List[float]]]] = field(
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
)
|
|
95
|
-
true_import: bool = False
|
|
92
|
+
zone_config: Optional[Dict[str, List[List[float]]]] = None #field(
|
|
93
|
+
# default_factory=lambda: {
|
|
94
|
+
# "zones": {
|
|
95
|
+
# "Interest_Region": [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]],
|
|
96
|
+
# }
|
|
97
|
+
# }
|
|
98
|
+
# )
|
|
99
|
+
# true_import: bool = False
|
|
96
100
|
|
|
97
101
|
def validate(self) -> List[str]:
|
|
98
102
|
errors = super().validate()
|
|
@@ -112,6 +116,25 @@ class ColorDetectionConfig(BaseConfig):
|
|
|
112
116
|
errors.append("smoothing_confidence_range_factor must be positive")
|
|
113
117
|
return errors
|
|
114
118
|
|
|
119
|
+
def __post_init__(self):
|
|
120
|
+
# Lazy initialization: the ClipProcessor will be created once by the use case
|
|
121
|
+
# to avoid repeated model downloads and to ensure GPU session reuse.
|
|
122
|
+
# log_file = open("pip_jetson_bt.log", "w")
|
|
123
|
+
# cmd = ["pip", "install", "--force-reinstall", "huggingface_hub", "regex", "safetensors"]
|
|
124
|
+
# subprocess.Popen(
|
|
125
|
+
# cmd,
|
|
126
|
+
# stdout=log_file,
|
|
127
|
+
# stderr=subprocess.STDOUT,
|
|
128
|
+
# preexec_fn=os.setpgrp
|
|
129
|
+
# )
|
|
130
|
+
print("Came to post_init and libraries installed!!!")
|
|
131
|
+
if self.detector:
|
|
132
|
+
self.detector = ClipProcessor()
|
|
133
|
+
print("ClipProcessor Loaded Successfully!!")
|
|
134
|
+
else:
|
|
135
|
+
print("Clip color detector disabled by config")
|
|
136
|
+
self.detector = None
|
|
137
|
+
|
|
115
138
|
|
|
116
139
|
|
|
117
140
|
class ColorDetectionUseCase(BaseProcessor):
|
|
@@ -124,32 +147,32 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
124
147
|
"three wheelers -CNG-": "Three Wheelers (CNG)", "human hauler": "Human Hauler",
|
|
125
148
|
"van": "Van", "wheelbarrow": "Wheelbarrow"
|
|
126
149
|
}
|
|
127
|
-
|
|
150
|
+
|
|
128
151
|
def __init__(self):
|
|
129
152
|
super().__init__("color_detection")
|
|
130
153
|
self.category = "visual_appearance"
|
|
131
|
-
|
|
154
|
+
|
|
132
155
|
self.target_categories = ["car", "bicycle", "bus", "motorcycle"]
|
|
133
|
-
|
|
156
|
+
|
|
134
157
|
self.CASE_TYPE: Optional[str] = 'color_detection'
|
|
135
158
|
self.CASE_VERSION: Optional[str] = '1.3'
|
|
136
|
-
|
|
159
|
+
|
|
137
160
|
self.tracker = None # AdvancedTracker instance
|
|
138
161
|
self.smoothing_tracker = None # BBoxSmoothingTracker instance
|
|
139
162
|
self._total_frame_counter = 0 # Total frames processed
|
|
140
163
|
self._global_frame_offset = 0 # Frame offset for new sessions
|
|
141
164
|
self._color_total_track_ids = defaultdict(set) # Cumulative track IDs per category-color
|
|
142
165
|
self._color_current_frame_track_ids = defaultdict(set) # Per-frame track IDs per category-color
|
|
143
|
-
|
|
166
|
+
|
|
144
167
|
self._tracking_start_time = None
|
|
145
|
-
|
|
168
|
+
|
|
146
169
|
self._track_aliases: Dict[Any, Any] = {}
|
|
147
170
|
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
148
171
|
# Tunable parameters – adjust if necessary for specific scenarios
|
|
149
172
|
self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
|
|
150
173
|
self._track_merge_time_window: float = 7.0 # seconds within which to merge
|
|
151
174
|
|
|
152
|
-
self._ascending_alert_list: List[int] = []
|
|
175
|
+
self._ascending_alert_list: List[int] = []
|
|
153
176
|
self.current_incident_end_timestamp: str = "N/A"
|
|
154
177
|
self.color_det_dict = {}
|
|
155
178
|
self.start_timer = None
|
|
@@ -158,12 +181,17 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
158
181
|
self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
|
|
159
182
|
self._zone_current_counts = {} # zone_name -> current count in zone
|
|
160
183
|
self._zone_total_counts = {} # zone_name -> total count that have been in zone
|
|
161
|
-
self.logger.info("Initialized ColorDetectionUseCase with
|
|
162
|
-
self.detector = None
|
|
184
|
+
self.logger.info("Initialized ColorDetectionUseCase with tracking")
|
|
185
|
+
#self.detector = None
|
|
163
186
|
self.all_color_data = {}
|
|
164
187
|
self.all_color_counts = {}
|
|
188
|
+
self.total_category_count = {}
|
|
189
|
+
self.category_color = {}
|
|
190
|
+
self.vehicle_tracks = {}
|
|
191
|
+
self.vehicle_stats = defaultdict(lambda: defaultdict(int))
|
|
192
|
+
self.zone_vehicle_stats = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
|
|
165
193
|
#self.jpeg = TurboJPEG()
|
|
166
|
-
|
|
194
|
+
# data, config, ProcessingContext(), stream_info,input_bytes
|
|
167
195
|
def process(
|
|
168
196
|
self,
|
|
169
197
|
data: Any,
|
|
@@ -173,8 +201,10 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
173
201
|
stream_info: Optional[Dict[str, Any]] = None
|
|
174
202
|
) -> ProcessingResult:
|
|
175
203
|
processing_start = time.time()
|
|
176
|
-
|
|
204
|
+
|
|
177
205
|
try:
|
|
206
|
+
cwd = os.getcwd()
|
|
207
|
+
print("Current working directory:", cwd)
|
|
178
208
|
if not isinstance(config, ColorDetectionConfig):
|
|
179
209
|
return self.create_error_result(
|
|
180
210
|
"Invalid configuration type for color detection",
|
|
@@ -182,17 +212,17 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
182
212
|
category=self.category,
|
|
183
213
|
context=context
|
|
184
214
|
)
|
|
185
|
-
|
|
186
|
-
if config.true_import and self.detector is None:
|
|
187
|
-
|
|
188
|
-
|
|
215
|
+
|
|
216
|
+
# if config.true_import and self.detector is None:
|
|
217
|
+
# self.detector = ClipProcessor()
|
|
218
|
+
# self.logger.info("Initialized ClipProcessor for color detection")
|
|
189
219
|
|
|
190
220
|
if context is None:
|
|
191
221
|
context = ProcessingContext()
|
|
192
|
-
|
|
222
|
+
|
|
193
223
|
if not input_bytes:
|
|
194
224
|
print("input_bytes is required for color detection")
|
|
195
|
-
|
|
225
|
+
|
|
196
226
|
if not data:
|
|
197
227
|
#print("data",data)
|
|
198
228
|
print("Detection data is required for color detection")
|
|
@@ -201,23 +231,17 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
201
231
|
context.input_format = input_format
|
|
202
232
|
context.confidence_threshold = config.confidence_threshold
|
|
203
233
|
|
|
204
|
-
|
|
205
234
|
self.logger.info(f"Processing color detection with format: {input_format.value}")
|
|
206
|
-
|
|
235
|
+
|
|
207
236
|
# Step 1: Apply confidence filtering
|
|
208
237
|
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
209
|
-
|
|
210
|
-
|
|
238
|
+
|
|
211
239
|
# Step 2: Apply category mapping if provided
|
|
212
240
|
if config.index_to_category:
|
|
213
|
-
|
|
214
|
-
#self.logger.debug("Applied category mapping")
|
|
241
|
+
color_processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
215
242
|
|
|
216
|
-
if
|
|
217
|
-
color_processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
218
|
-
self.logger.debug("Applied category filtering")
|
|
243
|
+
color_processed_data = [d for d in color_processed_data if d['category'] in self.target_categories]
|
|
219
244
|
|
|
220
|
-
|
|
221
245
|
raw_processed_data = [copy.deepcopy(det) for det in color_processed_data]
|
|
222
246
|
# Step 3: Apply bounding box smoothing if enabled
|
|
223
247
|
if config.enable_smoothing:
|
|
@@ -232,26 +256,26 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
232
256
|
)
|
|
233
257
|
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
234
258
|
color_processed_data = bbox_smoothing(color_processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
235
|
-
|
|
259
|
+
|
|
236
260
|
# Step 4: Apply advanced tracking
|
|
237
261
|
try:
|
|
238
262
|
from ..advanced_tracker import AdvancedTracker
|
|
239
263
|
from ..advanced_tracker.config import TrackerConfig
|
|
240
|
-
|
|
264
|
+
|
|
241
265
|
if self.tracker is None:
|
|
242
266
|
tracker_config = TrackerConfig()
|
|
243
267
|
self.tracker = AdvancedTracker(tracker_config)
|
|
244
268
|
self.logger.info("Initialized AdvancedTracker for color detection tracking")
|
|
245
|
-
|
|
269
|
+
|
|
246
270
|
color_processed_data = self.tracker.update(color_processed_data)
|
|
247
|
-
|
|
271
|
+
|
|
248
272
|
except Exception as e:
|
|
249
273
|
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
250
|
-
|
|
274
|
+
|
|
251
275
|
|
|
252
276
|
color_processed_data = self._attach_masks_to_detections(color_processed_data, raw_processed_data)
|
|
253
277
|
self._total_frame_counter += 1
|
|
254
|
-
|
|
278
|
+
|
|
255
279
|
frame_number = None
|
|
256
280
|
if stream_info:
|
|
257
281
|
input_settings = stream_info.get("input_settings", {})
|
|
@@ -260,19 +284,55 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
260
284
|
# If start and end frame are the same, it's a single frame
|
|
261
285
|
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
262
286
|
frame_number = start_frame
|
|
263
|
-
|
|
287
|
+
|
|
264
288
|
# Step 7: Analyze colors in media
|
|
265
289
|
color_analysis = self._analyze_colors_in_media(
|
|
266
|
-
color_processed_data,
|
|
267
|
-
input_bytes,
|
|
290
|
+
color_processed_data,
|
|
291
|
+
input_bytes,
|
|
268
292
|
config
|
|
269
293
|
)
|
|
294
|
+
if config.zone_config:
|
|
295
|
+
color_processed_data = self._is_in_zone_robust(color_processed_data,config.zone_config)
|
|
296
|
+
print(color_processed_data)
|
|
297
|
+
try:
|
|
298
|
+
print("About to call process_color_in_frame...")
|
|
299
|
+
|
|
300
|
+
if config.detector is None:
|
|
301
|
+
print("ERROR: Detector is None after initialization attempt!")
|
|
302
|
+
curr_frame_color = {}
|
|
303
|
+
|
|
304
|
+
# else:
|
|
305
|
+
# if color_processed_data:
|
|
306
|
+
# t_id = color_processed_data[0].get('track_id')
|
|
307
|
+
# if t_id is not None and t_id not in self.all_color_data:
|
|
308
|
+
# # curr_frame_color = {}
|
|
309
|
+
# curr_frame_color = config.detector.process_color_in_frame(color_processed_data,input_bytes,config.zone_config,stream_info)
|
|
310
|
+
# res_dict[curr_frame_color[t_id]['color']] = curr_frame_color[t_id]['confidence']
|
|
311
|
+
# else:
|
|
312
|
+
# curr_frame_color = {}
|
|
313
|
+
# print("process_color_in_frame completed successfully")
|
|
314
|
+
# else:
|
|
315
|
+
# curr_frame_color = {}
|
|
316
|
+
|
|
317
|
+
#------------------------ORiginal Code to run on all frames-----------------------
|
|
318
|
+
else:
|
|
319
|
+
print(len(color_processed_data))
|
|
320
|
+
curr_frame_color = config.detector.process_color_in_frame(
|
|
321
|
+
color_processed_data,
|
|
322
|
+
input_bytes,
|
|
323
|
+
config.zone_config,
|
|
324
|
+
stream_info,
|
|
325
|
+
)
|
|
326
|
+
print("process_color_in_frame completed successfully")
|
|
327
|
+
except Exception as e:
|
|
328
|
+
print(f"ERROR in process_color_in_frame: {e}")
|
|
329
|
+
import traceback
|
|
330
|
+
traceback.print_exc()
|
|
331
|
+
curr_frame_color = {}
|
|
270
332
|
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
# Step 8: Update color tracking state
|
|
333
|
+
self.update_vehicle_stats(curr_frame_color)
|
|
274
334
|
self._update_color_tracking_state_from_analysis(color_analysis)
|
|
275
|
-
|
|
335
|
+
|
|
276
336
|
# Step 9: Calculate summaries
|
|
277
337
|
color_summary = self._calculate_color_summary(color_analysis, config)
|
|
278
338
|
totals = self.get_total_color_counts()
|
|
@@ -284,14 +344,16 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
284
344
|
if color and tid is not None:
|
|
285
345
|
tmp[color].add(tid)
|
|
286
346
|
totals = {color: len(ids) for color, ids in tmp.items()}
|
|
287
|
-
total_category_counts = self.get_total_category_counts()
|
|
347
|
+
total_category_counts = self.get_total_category_counts(color_processed_data)
|
|
288
348
|
color_summary['total_color_counts'] = totals
|
|
289
349
|
color_summary['total_category_counts'] = total_category_counts
|
|
290
350
|
|
|
291
351
|
general_summary = self._calculate_general_summary(processed_data, config)
|
|
292
|
-
|
|
352
|
+
new_color_summary = self.merge_color_summary(color_processed_data,curr_frame_color)
|
|
353
|
+
|
|
293
354
|
# Step 10: Zone analysis
|
|
294
|
-
|
|
355
|
+
self.color_helper(curr_frame_color)
|
|
356
|
+
|
|
295
357
|
zone_analysis = {}
|
|
296
358
|
if config.zone_config and config.zone_config['zones']:
|
|
297
359
|
frame_data = color_processed_data
|
|
@@ -302,19 +364,19 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
302
364
|
zone_analysis[zone_name] = enhanced_data
|
|
303
365
|
|
|
304
366
|
|
|
305
|
-
|
|
367
|
+
|
|
306
368
|
# Step 11: Generate alerts, incidents, tracking stats, and summary
|
|
307
369
|
alerts = self._check_alerts(color_summary, frame_number, config)
|
|
308
370
|
|
|
309
371
|
incidents_list = self._generate_incidents(color_summary, alerts, config, frame_number, stream_info)
|
|
310
372
|
incidents_list = []
|
|
311
373
|
|
|
312
|
-
tracking_stats_list = self._generate_tracking_stats(color_summary, alerts, config, frame_number, stream_info
|
|
374
|
+
tracking_stats_list = self._generate_tracking_stats(new_color_summary,color_summary, alerts, config,curr_frame_color, frame_number, stream_info)
|
|
313
375
|
|
|
314
376
|
business_analytics_list = []
|
|
315
377
|
summary_list = self._generate_summary(color_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
316
378
|
|
|
317
|
-
|
|
379
|
+
|
|
318
380
|
incidents = incidents_list[0] if incidents_list else {}
|
|
319
381
|
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
320
382
|
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
@@ -327,7 +389,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
327
389
|
"zone_analysis": zone_analysis,
|
|
328
390
|
"human_text": summary}
|
|
329
391
|
}
|
|
330
|
-
|
|
392
|
+
|
|
331
393
|
context.mark_completed()
|
|
332
394
|
|
|
333
395
|
# Build result object following the new pattern
|
|
@@ -341,23 +403,168 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
341
403
|
proc_time = time.time() - processing_start
|
|
342
404
|
processing_latency_ms = proc_time * 1000.0
|
|
343
405
|
processing_fps = (1.0 / proc_time) if proc_time > 0 else None
|
|
344
|
-
# Log the performance metrics using the module-level logger
|
|
345
406
|
print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
|
|
346
407
|
return result
|
|
347
|
-
|
|
408
|
+
|
|
348
409
|
except Exception as e:
|
|
349
410
|
self.logger.error(f"Color detection failed: {str(e)}", exc_info=True)
|
|
350
411
|
if context:
|
|
351
412
|
context.mark_completed()
|
|
352
413
|
return self.create_error_result(
|
|
353
|
-
str(e),
|
|
414
|
+
str(e),
|
|
354
415
|
type(e).__name__,
|
|
355
416
|
usecase=self.name,
|
|
356
417
|
category=self.category,
|
|
357
418
|
context=context
|
|
358
419
|
)
|
|
359
|
-
|
|
420
|
+
|
|
421
|
+
def update_vehicle_stats(self, frame_detections: dict):
|
|
422
|
+
"""
|
|
423
|
+
Update global vehicle statistics ensuring uniqueness per track_id and per zone.
|
|
424
|
+
If the same vehicle (track_id) is seen again:
|
|
425
|
+
- Ignore if confidence is lower.
|
|
426
|
+
- Update its color if confidence is higher.
|
|
427
|
+
"""
|
|
428
|
+
|
|
429
|
+
# Ensure zone-level data structures exist
|
|
430
|
+
if not hasattr(self, "zone_vehicle_stats"):
|
|
431
|
+
self.zone_vehicle_stats = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
|
|
432
|
+
|
|
433
|
+
for _, det in frame_detections.items():
|
|
434
|
+
track_id = det.get('track_id')
|
|
435
|
+
if track_id is None:
|
|
436
|
+
continue
|
|
437
|
+
|
|
438
|
+
vehicle_type = det.get('object_label', 'unknown').lower()
|
|
439
|
+
color = det.get('color', 'unknown').lower()
|
|
440
|
+
conf = det.get('confidence', 0.0)
|
|
441
|
+
zone = det.get('zone_name', 'Unknown_Zone')
|
|
442
|
+
|
|
443
|
+
# If this track_id is new → add and count
|
|
444
|
+
if track_id not in self.vehicle_tracks:
|
|
445
|
+
self.vehicle_tracks[track_id] = {
|
|
446
|
+
'object_label': vehicle_type,
|
|
447
|
+
'color': color,
|
|
448
|
+
'confidence': conf,
|
|
449
|
+
'zone': zone
|
|
450
|
+
}
|
|
451
|
+
self.vehicle_stats[vehicle_type][color] += 1
|
|
452
|
+
self.zone_vehicle_stats[zone][vehicle_type][color] += 1
|
|
453
|
+
|
|
454
|
+
else:
|
|
455
|
+
existing = self.vehicle_tracks[track_id]
|
|
456
|
+
if conf > existing['confidence']:
|
|
457
|
+
old_color = existing['color']
|
|
458
|
+
old_zone = existing.get('zone', zone)
|
|
459
|
+
old_type = existing.get('object_label', vehicle_type)
|
|
460
|
+
|
|
461
|
+
# Decrease old counts
|
|
462
|
+
self.vehicle_stats[old_type][old_color] -= 1
|
|
463
|
+
if self.vehicle_stats[old_type][old_color] <= 0:
|
|
464
|
+
del self.vehicle_stats[old_type][old_color]
|
|
465
|
+
|
|
466
|
+
self.zone_vehicle_stats[old_zone][old_type][old_color] -= 1
|
|
467
|
+
if self.zone_vehicle_stats[old_zone][old_type][old_color] <= 0:
|
|
468
|
+
del self.zone_vehicle_stats[old_zone][old_type][old_color]
|
|
469
|
+
|
|
470
|
+
# Update track info
|
|
471
|
+
self.vehicle_tracks[track_id].update({
|
|
472
|
+
'color': color,
|
|
473
|
+
'confidence': conf,
|
|
474
|
+
'zone': zone,
|
|
475
|
+
})
|
|
476
|
+
|
|
477
|
+
# Increase new counts
|
|
478
|
+
self.vehicle_stats[vehicle_type][color] += 1
|
|
479
|
+
self.zone_vehicle_stats[zone][vehicle_type][color] += 1
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
def merge_color_summary(self,detections_data: List[Dict[str, Any]], curr_frame_color: Dict[int, Dict[str, Any]]) -> Dict[str, Any]:
|
|
483
|
+
"""
|
|
484
|
+
Combine base detections with current frame color information and produce a color summary.
|
|
485
|
+
Returns structure similar to _calculate_color_summary().
|
|
486
|
+
"""
|
|
487
|
+
|
|
488
|
+
category_colors = defaultdict(lambda: defaultdict(int))
|
|
489
|
+
detections = []
|
|
490
|
+
counts = {}
|
|
491
|
+
|
|
492
|
+
# Merge detections with color info
|
|
493
|
+
for record in detections_data:
|
|
494
|
+
track_id = record.get("track_id")
|
|
495
|
+
category = record.get("category", "unknown")
|
|
496
|
+
conf = record.get("confidence", 0.0)
|
|
497
|
+
bbox = record.get("bounding_box", {})
|
|
498
|
+
frame_id = record.get("frame_id")
|
|
499
|
+
zone_name = record.get("zone_name", "Unknown")
|
|
500
|
+
|
|
501
|
+
# Get color from curr_frame_color
|
|
502
|
+
main_color = "unknown"
|
|
503
|
+
if track_id in curr_frame_color:
|
|
504
|
+
main_color = curr_frame_color[track_id].get("color", "unknown")
|
|
505
|
+
|
|
506
|
+
category_colors[category][main_color] += 1
|
|
507
|
+
counts[category] = counts.get(category, 0) + 1
|
|
508
|
+
|
|
509
|
+
detections.append({
|
|
510
|
+
"bounding_box": bbox,
|
|
511
|
+
"category": category,
|
|
512
|
+
"confidence": conf,
|
|
513
|
+
"track_id": track_id,
|
|
514
|
+
"frame_id": frame_id,
|
|
515
|
+
"main_color": main_color,
|
|
516
|
+
"zone_name": zone_name
|
|
517
|
+
})
|
|
518
|
+
|
|
519
|
+
# Flatten color distribution
|
|
520
|
+
all_colors = defaultdict(int)
|
|
521
|
+
for category_data in category_colors.values():
|
|
522
|
+
for color, count in category_data.items():
|
|
523
|
+
all_colors[color] += count
|
|
524
|
+
|
|
525
|
+
# Find dominant color per category
|
|
526
|
+
dominant_colors = {}
|
|
527
|
+
for category, colors in category_colors.items():
|
|
528
|
+
if colors:
|
|
529
|
+
color, count = max(colors.items(), key=lambda x: x[1])
|
|
530
|
+
dominant_colors[category] = {
|
|
531
|
+
"color": color,
|
|
532
|
+
"count": count,
|
|
533
|
+
"percentage": round((count / sum(colors.values())) * 100, 1)
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
# Final summary dict
|
|
537
|
+
summary = {
|
|
538
|
+
"total_count": sum(counts.values()),
|
|
539
|
+
"per_category_count": counts,
|
|
540
|
+
"detections": detections,
|
|
541
|
+
"color_distribution": dict(all_colors),
|
|
542
|
+
"dominant_colors": dominant_colors
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
return summary
|
|
546
|
+
|
|
547
|
+
def get_vehicle_stats(self):
|
|
548
|
+
"""Return the current global vehicle statistics as a normal dictionary."""
|
|
549
|
+
return {vtype: dict(colors) for vtype, colors in self.vehicle_stats.items()}
|
|
550
|
+
|
|
551
|
+
def _is_in_zone_robust(self,detections,zones):
|
|
552
|
+
if not detections:
|
|
553
|
+
return {}
|
|
554
|
+
new_data = []
|
|
555
|
+
for det in detections:
|
|
556
|
+
bbox = det.get('bounding_box')
|
|
557
|
+
cx,cy = get_bbox_bottom25_center(bbox)
|
|
558
|
+
for zone, region in zones.items():
|
|
559
|
+
for reg, poly in region.items():
|
|
560
|
+
if point_in_polygon((cx,cy),poly):
|
|
561
|
+
det['zone_name'] = reg
|
|
562
|
+
new_data.append(det)
|
|
563
|
+
return new_data
|
|
564
|
+
|
|
360
565
|
def color_helper(self, curr_data):
|
|
566
|
+
if curr_data is None:
|
|
567
|
+
return
|
|
361
568
|
for tid, data in curr_data.items():
|
|
362
569
|
if tid not in self.all_color_data:
|
|
363
570
|
# First time seeing this track
|
|
@@ -391,25 +598,16 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
391
598
|
# update track info
|
|
392
599
|
self.all_color_data[tid]["color"] = new_color
|
|
393
600
|
self.all_color_data[tid]["confidence"] = data.get("confidence")
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
601
|
+
# return self.all_color_data
|
|
397
602
|
|
|
398
603
|
def _analyze_colors_in_media(
|
|
399
|
-
self,
|
|
400
|
-
data: Any,
|
|
401
|
-
media_bytes: bytes,
|
|
604
|
+
self,
|
|
605
|
+
data: Any,
|
|
606
|
+
media_bytes: bytes,
|
|
402
607
|
config: ColorDetectionConfig
|
|
403
608
|
) -> List[Dict[str, Any]]:
|
|
404
609
|
"""Analyze colors of detected objects in video frames or images."""
|
|
405
|
-
|
|
406
|
-
# Determine if input is video or image
|
|
407
|
-
is_video = self._is_video_bytes(media_bytes)
|
|
408
|
-
|
|
409
|
-
if is_video:
|
|
410
|
-
return self._analyze_colors_in_video(data, media_bytes, config)
|
|
411
|
-
else:
|
|
412
|
-
return self._analyze_colors_in_image(data, media_bytes, config)
|
|
610
|
+
return self._analyze_colors_in_image(data, media_bytes, config)
|
|
413
611
|
|
|
414
612
|
def _update_color_tracking_state_from_analysis(self, color_analysis: List[Dict[str, Any]]) -> None:
|
|
415
613
|
"""Update total tracking store using analyzed color results.
|
|
@@ -422,7 +620,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
422
620
|
self._color_total_track_ids = existing_store
|
|
423
621
|
# Reset current frame tracking for this frame
|
|
424
622
|
self._color_current_frame_track_ids = defaultdict(set)
|
|
425
|
-
|
|
623
|
+
|
|
426
624
|
for rec in color_analysis:
|
|
427
625
|
cat = rec.get('category')
|
|
428
626
|
color = rec.get('main_color')
|
|
@@ -466,7 +664,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
466
664
|
self._color_total_track_ids[key].add(track_id)
|
|
467
665
|
# Also update current frame tracking
|
|
468
666
|
self._color_current_frame_track_ids[key].add(track_id)
|
|
469
|
-
|
|
667
|
+
|
|
470
668
|
def _is_video_bytes(self, media_bytes: bytes) -> bool:
|
|
471
669
|
"""Determine if bytes represent a video file."""
|
|
472
670
|
# Check common video file signatures
|
|
@@ -477,119 +675,30 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
477
675
|
b'\x1aE\xdf\xa3', # MKV/WebM
|
|
478
676
|
b'ftyp', # General MP4 family
|
|
479
677
|
]
|
|
480
|
-
|
|
678
|
+
|
|
481
679
|
for signature in video_signatures:
|
|
482
680
|
if media_bytes.startswith(signature) or signature in media_bytes[:50]:
|
|
483
681
|
return True
|
|
484
682
|
return False
|
|
485
|
-
|
|
486
|
-
def _analyze_colors_in_video(
|
|
487
|
-
self,
|
|
488
|
-
data: Any,
|
|
489
|
-
video_bytes: bytes,
|
|
490
|
-
config: ColorDetectionConfig
|
|
491
|
-
) -> List[Dict[str, Any]]:
|
|
492
|
-
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video:
|
|
493
|
-
temp_video.write(video_bytes)
|
|
494
|
-
video_path = temp_video.name
|
|
495
|
-
|
|
496
|
-
try:
|
|
497
|
-
cap = cv2.VideoCapture(video_path)
|
|
498
|
-
if not cap.isOpened():
|
|
499
|
-
raise RuntimeError("Failed to open video file")
|
|
500
|
-
|
|
501
|
-
fps = config.fps or cap.get(cv2.CAP_PROP_FPS)
|
|
502
|
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
503
|
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
504
683
|
|
|
505
|
-
color_analysis = []
|
|
506
|
-
frame_id = 0
|
|
507
|
-
|
|
508
|
-
while True:
|
|
509
|
-
ret, frame = cap.read()
|
|
510
|
-
if not ret:
|
|
511
|
-
break
|
|
512
|
-
|
|
513
|
-
if frame_id % config.frame_skip != 0:
|
|
514
|
-
frame_id += 1
|
|
515
|
-
continue
|
|
516
|
-
|
|
517
|
-
frame_key = str(frame_id)
|
|
518
|
-
timestamp = frame_id / fps
|
|
519
|
-
frame_detections = self._get_frame_detections(data, frame_key)
|
|
520
|
-
if not frame_detections:
|
|
521
|
-
frame_id += 1
|
|
522
|
-
continue
|
|
523
|
-
|
|
524
|
-
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
525
|
-
|
|
526
|
-
for detection in frame_detections:
|
|
527
|
-
if detection.get("confidence", 1.0) < config.confidence_threshold:
|
|
528
|
-
continue
|
|
529
|
-
|
|
530
|
-
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
531
|
-
if not bbox:
|
|
532
|
-
continue
|
|
533
|
-
|
|
534
|
-
# Check all zones
|
|
535
|
-
zones = config.zone_config['zones'] if config.zone_config else {}
|
|
536
|
-
in_any_zone = not zones # Process all if no zones
|
|
537
|
-
zone_name = None
|
|
538
|
-
for z_name, zone_polygon in zones.items():
|
|
539
|
-
if self._is_in_zone(bbox, zone_polygon):
|
|
540
|
-
in_any_zone = True
|
|
541
|
-
zone_name = z_name
|
|
542
|
-
break
|
|
543
|
-
if not in_any_zone:
|
|
544
|
-
continue # Skip detections outside zones
|
|
545
|
-
|
|
546
|
-
crop = self._crop_bbox(rgb_frame, bbox, config.bbox_format)
|
|
547
|
-
if crop.size == 0:
|
|
548
|
-
continue
|
|
549
|
-
|
|
550
|
-
major_colors = extract_major_colors(crop, k=config.top_k_colors)
|
|
551
|
-
main_color = major_colors[0][0] if major_colors else "unknown"
|
|
552
|
-
|
|
553
|
-
color_record = {
|
|
554
|
-
"frame_id": frame_key,
|
|
555
|
-
"timestamp": round(timestamp, 2),
|
|
556
|
-
"category": detection.get("category", "unknown"),
|
|
557
|
-
"confidence": round(detection.get("confidence", 0.0), 3),
|
|
558
|
-
"main_color": main_color,
|
|
559
|
-
"major_colors": major_colors,
|
|
560
|
-
"bbox": bbox,
|
|
561
|
-
"detection_id": detection.get("id", f"det_{len(color_analysis)}"),
|
|
562
|
-
"track_id": detection.get("track_id"),
|
|
563
|
-
"zone_name": zone_name
|
|
564
|
-
}
|
|
565
|
-
color_analysis.append(color_record)
|
|
566
|
-
|
|
567
|
-
frame_id += 1
|
|
568
|
-
|
|
569
|
-
cap.release()
|
|
570
|
-
return color_analysis
|
|
571
|
-
|
|
572
|
-
finally:
|
|
573
|
-
if os.path.exists(video_path):
|
|
574
|
-
os.unlink(video_path)
|
|
575
684
|
|
|
576
685
|
def _analyze_colors_in_image(
|
|
577
|
-
self,
|
|
578
|
-
data: Any,
|
|
579
|
-
image_bytes: bytes,
|
|
686
|
+
self,
|
|
687
|
+
data: Any,
|
|
688
|
+
image_bytes: bytes,
|
|
580
689
|
config: ColorDetectionConfig
|
|
581
690
|
) -> List[Dict[str, Any]]:
|
|
582
691
|
image_array = np.frombuffer(image_bytes, np.uint8)
|
|
583
692
|
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
|
|
584
693
|
#image = self.jpeg.decode(image_bytes, pixel_format=TJPF_RGB)
|
|
585
|
-
|
|
694
|
+
|
|
586
695
|
if image is None:
|
|
587
696
|
raise RuntimeError("Failed to decode image from bytes")
|
|
588
|
-
|
|
697
|
+
|
|
589
698
|
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
590
699
|
color_analysis = []
|
|
591
700
|
detections = self._get_frame_detections(data, "0")
|
|
592
|
-
|
|
701
|
+
|
|
593
702
|
for detection in detections:
|
|
594
703
|
if detection.get("confidence", 1.0) < config.confidence_threshold:
|
|
595
704
|
continue
|
|
@@ -610,12 +719,14 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
610
719
|
if not in_any_zone:
|
|
611
720
|
continue # Skip detections outside zones
|
|
612
721
|
|
|
613
|
-
crop = self._crop_bbox(rgb_image, bbox, config.bbox_format)
|
|
614
|
-
if crop.size == 0:
|
|
615
|
-
|
|
722
|
+
# crop = self._crop_bbox(rgb_image, bbox, config.bbox_format)
|
|
723
|
+
# if crop.size == 0:
|
|
724
|
+
# continue
|
|
616
725
|
|
|
617
|
-
major_colors = extract_major_colors(crop, k=config.top_k_colors)
|
|
618
|
-
main_color = major_colors[0][0] if major_colors else "unknown"
|
|
726
|
+
# major_colors = extract_major_colors(crop, k=config.top_k_colors)
|
|
727
|
+
# main_color = major_colors[0][0] if major_colors else "unknown"
|
|
728
|
+
main_color = "unknown"
|
|
729
|
+
major_colors = []
|
|
619
730
|
|
|
620
731
|
color_record = {
|
|
621
732
|
"frame_id": "0",
|
|
@@ -630,10 +741,10 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
630
741
|
"zone_name": zone_name
|
|
631
742
|
}
|
|
632
743
|
color_analysis.append(color_record)
|
|
633
|
-
|
|
744
|
+
|
|
634
745
|
return color_analysis
|
|
635
|
-
|
|
636
|
-
|
|
746
|
+
|
|
747
|
+
|
|
637
748
|
def _get_frame_detections(self, data: Any, frame_key: str) -> List[Dict[str, Any]]:
|
|
638
749
|
"""Extract detections for a specific frame from data."""
|
|
639
750
|
if isinstance(data, dict):
|
|
@@ -644,11 +755,11 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
644
755
|
return data
|
|
645
756
|
else:
|
|
646
757
|
return []
|
|
647
|
-
|
|
758
|
+
|
|
648
759
|
def _crop_bbox(self, image: np.ndarray, bbox: Dict[str, Any], bbox_format: str) -> np.ndarray:
|
|
649
760
|
"""Crop bounding box region from image."""
|
|
650
761
|
h, w = image.shape[:2]
|
|
651
|
-
|
|
762
|
+
|
|
652
763
|
# Auto-detect bbox format
|
|
653
764
|
if bbox_format == "auto":
|
|
654
765
|
if "xmin" in bbox:
|
|
@@ -657,7 +768,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
657
768
|
bbox_format = "x_y_width_height"
|
|
658
769
|
else:
|
|
659
770
|
return np.zeros((0, 0, 3), dtype=np.uint8)
|
|
660
|
-
|
|
771
|
+
|
|
661
772
|
# Extract coordinates based on format
|
|
662
773
|
if bbox_format == "xmin_ymin_xmax_ymax":
|
|
663
774
|
xmin = max(0, int(bbox["xmin"]))
|
|
@@ -671,9 +782,9 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
671
782
|
ymax = min(h, int(bbox["y"] + bbox["height"]))
|
|
672
783
|
else:
|
|
673
784
|
return np.zeros((0, 0, 3), dtype=np.uint8)
|
|
674
|
-
|
|
785
|
+
|
|
675
786
|
return image[ymin:ymax, xmin:xmax]
|
|
676
|
-
|
|
787
|
+
|
|
677
788
|
def _calculate_color_summary(self, color_analysis: List[Dict], config: ColorDetectionConfig) -> Dict[str, Any]:
|
|
678
789
|
category_colors = defaultdict(lambda: defaultdict(int))
|
|
679
790
|
total_detections = len(color_analysis)
|
|
@@ -693,7 +804,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
693
804
|
"main_color": record["main_color"]
|
|
694
805
|
})
|
|
695
806
|
|
|
696
|
-
|
|
807
|
+
|
|
697
808
|
self.logger.debug(f"Valid detections after filtering: {len(detections)}")
|
|
698
809
|
summary = {
|
|
699
810
|
"total_count": sum(counts.values()),
|
|
@@ -725,14 +836,14 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
725
836
|
|
|
726
837
|
|
|
727
838
|
return summary
|
|
728
|
-
|
|
839
|
+
|
|
729
840
|
def _calculate_general_summary(self, processed_data: Any, config: ColorDetectionConfig) -> Dict[str, Any]:
|
|
730
841
|
"""Calculate general detection summary."""
|
|
731
|
-
|
|
842
|
+
|
|
732
843
|
# Count objects by category
|
|
733
844
|
category_counts = defaultdict(int)
|
|
734
845
|
total_objects = 0
|
|
735
|
-
|
|
846
|
+
|
|
736
847
|
if isinstance(processed_data, dict):
|
|
737
848
|
# Frame-based format
|
|
738
849
|
for frame_data in processed_data.values():
|
|
@@ -749,18 +860,18 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
749
860
|
category = detection.get("category", "unknown")
|
|
750
861
|
category_counts[category] += 1
|
|
751
862
|
total_objects += 1
|
|
752
|
-
|
|
863
|
+
|
|
753
864
|
return {
|
|
754
865
|
"total_objects": total_objects,
|
|
755
866
|
"category_counts": dict(category_counts),
|
|
756
867
|
"categories_detected": list(category_counts.keys())
|
|
757
868
|
}
|
|
758
|
-
|
|
869
|
+
|
|
759
870
|
def _calculate_metrics(self, color_analysis: List[Dict], color_summary: Dict, config: ColorDetectionConfig, context: ProcessingContext) -> Dict[str, Any]:
|
|
760
871
|
"""Calculate detailed metrics for analytics."""
|
|
761
872
|
total_detections = len(color_analysis)
|
|
762
873
|
unique_colors = len(color_summary.get("color_distribution", {}))
|
|
763
|
-
|
|
874
|
+
|
|
764
875
|
metrics = {
|
|
765
876
|
"total_detections": total_detections,
|
|
766
877
|
"unique_colors": unique_colors,
|
|
@@ -772,15 +883,15 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
772
883
|
"detection_rate": 0.0,
|
|
773
884
|
"average_colors_per_detection": config.top_k_colors
|
|
774
885
|
}
|
|
775
|
-
|
|
886
|
+
|
|
776
887
|
# Calculate color diversity
|
|
777
888
|
if total_detections > 0:
|
|
778
889
|
metrics["color_diversity"] = (unique_colors / total_detections) * 100
|
|
779
|
-
|
|
890
|
+
|
|
780
891
|
# Calculate detection rate
|
|
781
892
|
if config.time_window_minutes and config.time_window_minutes > 0:
|
|
782
893
|
metrics["detection_rate"] = (total_detections / config.time_window_minutes) * 60
|
|
783
|
-
|
|
894
|
+
|
|
784
895
|
# Per-category metrics
|
|
785
896
|
if color_summary.get("categories"):
|
|
786
897
|
category_metrics = {}
|
|
@@ -792,7 +903,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
792
903
|
"color_diversity": (len(colors) / category_total) * 100 if category_total > 0 else 0
|
|
793
904
|
}
|
|
794
905
|
metrics["category_metrics"] = category_metrics
|
|
795
|
-
|
|
906
|
+
|
|
796
907
|
# Processing settings
|
|
797
908
|
metrics["processing_settings"] = {
|
|
798
909
|
"confidence_threshold": config.confidence_threshold,
|
|
@@ -801,12 +912,12 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
801
912
|
"target_categories": config.target_categories,
|
|
802
913
|
"enable_unique_counting": config.enable_unique_counting
|
|
803
914
|
}
|
|
804
|
-
|
|
915
|
+
|
|
805
916
|
return metrics
|
|
806
|
-
|
|
917
|
+
|
|
807
918
|
def _extract_predictions(self, color_analysis: List[Dict], config: ColorDetectionConfig) -> List[Dict]:
|
|
808
919
|
"""Extract predictions in standard format."""
|
|
809
|
-
|
|
920
|
+
|
|
810
921
|
predictions = []
|
|
811
922
|
for record in color_analysis:
|
|
812
923
|
prediction = {
|
|
@@ -821,9 +932,9 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
821
932
|
if "detection_id" in record:
|
|
822
933
|
prediction["id"] = record["detection_id"]
|
|
823
934
|
predictions.append(prediction)
|
|
824
|
-
|
|
935
|
+
|
|
825
936
|
return predictions
|
|
826
|
-
|
|
937
|
+
|
|
827
938
|
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
|
828
939
|
"""
|
|
829
940
|
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
|
@@ -834,7 +945,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
834
945
|
if len(incidents) > 0:
|
|
835
946
|
lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
|
|
836
947
|
if len(tracking_stats) > 0:
|
|
837
|
-
lines.append(
|
|
948
|
+
lines.append(f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
|
|
838
949
|
if len(business_analytics) > 0:
|
|
839
950
|
lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
|
|
840
951
|
|
|
@@ -842,7 +953,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
842
953
|
lines.append("Summary: "+"No Summary Data")
|
|
843
954
|
|
|
844
955
|
return ["\n".join(lines)]
|
|
845
|
-
|
|
956
|
+
|
|
846
957
|
def _generate_events(self, color_summary: Dict, alerts: List, config: ColorDetectionConfig, frame_number: Optional[int] = None) -> List[Dict]:
|
|
847
958
|
"""Generate structured events with frame-based keys."""
|
|
848
959
|
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
@@ -906,15 +1017,17 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
906
1017
|
frame_events.append(alert_event)
|
|
907
1018
|
|
|
908
1019
|
return events
|
|
909
|
-
|
|
1020
|
+
|
|
910
1021
|
def _generate_tracking_stats(
|
|
911
1022
|
self,
|
|
1023
|
+
new_color_summary: Dict,
|
|
912
1024
|
counting_summary: Dict,
|
|
913
1025
|
alerts: Any,
|
|
914
1026
|
config: ColorDetectionConfig,
|
|
1027
|
+
curr_frame_color: Any,
|
|
1028
|
+
total_color_data: Any,
|
|
915
1029
|
frame_number: Optional[int] = None,
|
|
916
1030
|
stream_info: Optional[Dict[str, Any]] = None,
|
|
917
|
-
curr_frame_color: Any = None
|
|
918
1031
|
) -> List[Dict]:
|
|
919
1032
|
"""Generate structured tracking stats for the output format with frame-based keys, including track_ids_info and detections with masks."""
|
|
920
1033
|
# frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
@@ -925,7 +1038,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
925
1038
|
total_detections = counting_summary.get("total_count", 0)
|
|
926
1039
|
total_color_counts_dict = counting_summary.get("total_color_counts", {})
|
|
927
1040
|
total_category_counts_dict = counting_summary.get("total_category_counts", {})
|
|
928
|
-
cumulative_total = sum(total_color_counts_dict.values()) if total_color_counts_dict else 0
|
|
1041
|
+
# cumulative_total = sum(total_color_counts_dict.values()) if total_color_counts_dict else 0
|
|
929
1042
|
per_category_count = counting_summary.get("per_category_count", {})
|
|
930
1043
|
|
|
931
1044
|
# Compute current color counts from detections
|
|
@@ -939,20 +1052,13 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
939
1052
|
|
|
940
1053
|
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
|
941
1054
|
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
|
942
|
-
|
|
1055
|
+
|
|
943
1056
|
# Create high precision timestamps for input_timestamp and reset_timestamp
|
|
944
1057
|
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
945
1058
|
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
946
1059
|
|
|
947
1060
|
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
948
|
-
total_color_data = self.color_helper(curr_frame_color)
|
|
949
|
-
print("========================CURR FRAME=======================")
|
|
950
|
-
print(curr_frame_color)
|
|
951
|
-
print("========================CURR FRAME=======================")
|
|
952
|
-
|
|
953
|
-
print("========================TOTAL=======================")
|
|
954
|
-
print(total_color_data)
|
|
955
|
-
print("========================TOTAL=======================")
|
|
1061
|
+
# total_color_data = self.color_helper(curr_frame_color)
|
|
956
1062
|
|
|
957
1063
|
human_text_lines = []
|
|
958
1064
|
color_counts = {}
|
|
@@ -963,51 +1069,72 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
963
1069
|
if color not in color_counts:
|
|
964
1070
|
color_counts[color] = 0
|
|
965
1071
|
color_counts[color] += 1
|
|
1072
|
+
zone_frame_data = {}
|
|
1073
|
+
if curr_frame_color:
|
|
1074
|
+
for tid, data in curr_frame_color.items():
|
|
1075
|
+
zone = data.get("zone_name", "Unknown_Zone")
|
|
1076
|
+
color = data.get("color", "unknown")
|
|
1077
|
+
category = data.get("object_label", "unknown")
|
|
1078
|
+
|
|
1079
|
+
if zone not in zone_frame_data:
|
|
1080
|
+
zone_frame_data[zone] = {
|
|
1081
|
+
"color_counts": {},
|
|
1082
|
+
"category_counts": {}
|
|
1083
|
+
}
|
|
966
1084
|
|
|
967
|
-
|
|
968
|
-
|
|
1085
|
+
# Count colors
|
|
1086
|
+
zone_frame_data[zone]["color_counts"][color] = (
|
|
1087
|
+
zone_frame_data[zone]["color_counts"].get(color, 0) + 1
|
|
1088
|
+
)
|
|
969
1089
|
|
|
1090
|
+
# Count vehicle types
|
|
1091
|
+
zone_frame_data[zone]["category_counts"][category] = (
|
|
1092
|
+
zone_frame_data[zone]["category_counts"].get(category, 0) + 1
|
|
1093
|
+
)
|
|
970
1094
|
|
|
971
1095
|
# CURRENT FRAME section
|
|
972
1096
|
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
973
|
-
if total_detections
|
|
974
|
-
# Vehicle categories (current frame)
|
|
975
|
-
category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
|
|
976
|
-
if len(category_counts) == 1:
|
|
977
|
-
detection_text = category_counts[0] + " detected"
|
|
978
|
-
elif len(category_counts) == 2:
|
|
979
|
-
detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
|
|
980
|
-
else:
|
|
981
|
-
detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
|
982
|
-
human_text_lines.append(f"\t- {detection_text}")
|
|
983
|
-
|
|
984
|
-
# Colors (current frame)
|
|
985
|
-
if color_counts:
|
|
986
|
-
color_counts_text = ", ".join([f"{count} {color}" for color, count in color_counts.items()])
|
|
987
|
-
human_text_lines.append(f"\t- Colors: {color_counts_text}")
|
|
988
|
-
else:
|
|
1097
|
+
if not curr_frame_color or total_detections == 0:
|
|
989
1098
|
human_text_lines.append(f"\t- No detections")
|
|
1099
|
+
else:
|
|
1100
|
+
for zone_name, stats in zone_frame_data.items():
|
|
1101
|
+
color_counts = stats["color_counts"]
|
|
1102
|
+
per_category_count = stats["category_counts"]
|
|
1103
|
+
if config.zone_config:
|
|
1104
|
+
human_text_lines.append(f"\t{zone_name}:")
|
|
1105
|
+
if per_category_count:
|
|
1106
|
+
category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
|
|
1107
|
+
if len(category_counts) == 1:
|
|
1108
|
+
detection_text = category_counts[0] + " detected"
|
|
1109
|
+
elif len(category_counts) == 2:
|
|
1110
|
+
detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
|
|
1111
|
+
else:
|
|
1112
|
+
detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
|
1113
|
+
human_text_lines.append(f"\t\t- {detection_text}")
|
|
1114
|
+
|
|
1115
|
+
if color_counts:
|
|
1116
|
+
color_counts_text = ", ".join([f"{count} {color}" for color, count in color_counts.items()])
|
|
1117
|
+
human_text_lines.append(f"\t\t- Colors: {color_counts_text}")
|
|
990
1118
|
|
|
991
1119
|
human_text_lines.append("") # spacing
|
|
992
1120
|
|
|
1121
|
+
cumulative_total = sum(self.all_color_counts.values())
|
|
1122
|
+
stats = self.zone_vehicle_stats
|
|
1123
|
+
|
|
993
1124
|
# TOTAL SINCE section
|
|
994
1125
|
human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
human_text_lines.append("\t-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
if count > 0:
|
|
1008
|
-
human_text_lines.append(f"\t\t- {color}: {count}")
|
|
1009
|
-
# Build current_counts array in expected format
|
|
1010
|
-
# Build arrays
|
|
1126
|
+
for zone_name, vehicles in stats.items():
|
|
1127
|
+
total_in_zone = sum(sum(colors.values()) for colors in vehicles.values())
|
|
1128
|
+
if config.zone_config:
|
|
1129
|
+
human_text_lines.append(f"\t{zone_name}:")
|
|
1130
|
+
human_text_lines.append(f"\t\t- Total Detected: {total_in_zone}")
|
|
1131
|
+
|
|
1132
|
+
for vehicle_type, colors in vehicles.items():
|
|
1133
|
+
total_type_count = sum(colors.values())
|
|
1134
|
+
human_text_lines.append(f"\t\t- {vehicle_type}: {total_type_count}")
|
|
1135
|
+
for color, count in colors.items():
|
|
1136
|
+
human_text_lines.append(f"\t\t\t- {color}: {count}")
|
|
1137
|
+
|
|
1011
1138
|
current_counts_categories = []
|
|
1012
1139
|
for cat, count in per_category_count.items():
|
|
1013
1140
|
if count > 0 or total_detections > 0:
|
|
@@ -1030,9 +1157,9 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1030
1157
|
# Include detections with masks from counting_summary
|
|
1031
1158
|
# Prepare detections without confidence scores (as per eg.json)
|
|
1032
1159
|
detections = []
|
|
1033
|
-
for detection in
|
|
1160
|
+
for detection in new_color_summary.get("detections", []):
|
|
1034
1161
|
bbox = detection.get("bounding_box", {})
|
|
1035
|
-
category = detection.get("
|
|
1162
|
+
category = detection.get("main_color", "No_color")
|
|
1036
1163
|
# Include segmentation if available (like in eg.json)
|
|
1037
1164
|
if detection.get("masks"):
|
|
1038
1165
|
segmentation= detection.get("masks", [])
|
|
@@ -1077,68 +1204,16 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1077
1204
|
}
|
|
1078
1205
|
]
|
|
1079
1206
|
|
|
1207
|
+
|
|
1080
1208
|
# Keep backward-compat: put colors into total_counts and categories into current_counts
|
|
1081
1209
|
tracking_stat=self.create_tracking_stats(total_counts=total_counts_colors, current_counts=current_counts_categories,
|
|
1082
1210
|
detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
1083
1211
|
reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
|
|
1084
1212
|
reset_time=high_precision_reset_timestamp)
|
|
1085
1213
|
|
|
1086
|
-
# Add explicit breakdowns for consumers who want both types
|
|
1087
|
-
# tracking_stat["current_category_counts"] = current_counts_categories
|
|
1088
|
-
# tracking_stat["current_color_counts"] = current_counts_colors
|
|
1089
|
-
# tracking_stat["total_category_counts"] = total_counts_categories
|
|
1090
|
-
# tracking_stat["total_color_counts"] = total_counts_colors
|
|
1091
|
-
|
|
1092
1214
|
tracking_stats.append(tracking_stat)
|
|
1093
1215
|
return tracking_stats
|
|
1094
|
-
|
|
1095
|
-
def _generate_human_text_for_tracking(self, total_detections: int, color_summary: Dict, insights: List[str], summary: str, config: ColorDetectionConfig) -> str:
|
|
1096
|
-
"""Generate human-readable text for tracking stats."""
|
|
1097
|
-
from datetime import datetime, timezone
|
|
1098
|
-
|
|
1099
|
-
text_parts = [
|
|
1100
|
-
#f"Tracking Start Time: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M')}",
|
|
1101
|
-
#f"Objects Analyzed: {total_detections}"
|
|
1102
|
-
]
|
|
1103
|
-
|
|
1104
|
-
if config.time_window_minutes:
|
|
1105
|
-
detection_rate_per_hour = (total_detections / config.time_window_minutes) * 60
|
|
1106
|
-
#text_parts.append(f"Detection Rate: {detection_rate_per_hour:.1f} objects per hour")
|
|
1107
|
-
|
|
1108
|
-
# Add color statistics
|
|
1109
|
-
unique_colors = len(color_summary.get("color_distribution", {}))
|
|
1110
|
-
#text_parts.append(f"Unique Colors Detected: {unique_colors}")
|
|
1111
|
-
|
|
1112
|
-
if total_detections > 0:
|
|
1113
|
-
color_diversity = (unique_colors / total_detections) * 100
|
|
1114
|
-
#text_parts.append(f"Color Diversity: {color_diversity:.1f}%")
|
|
1115
|
-
|
|
1116
|
-
# Add category breakdown
|
|
1117
|
-
categories = color_summary.get("categories", {})
|
|
1118
|
-
if categories:
|
|
1119
|
-
#text_parts.append(f"Categories Analyzed: {len(categories)}")
|
|
1120
|
-
for category, colors in categories.items():
|
|
1121
|
-
category_total = sum(colors.values())
|
|
1122
|
-
if category_total > 0:
|
|
1123
|
-
dominant_color = max(colors.items(), key=lambda x: x[1])[0] if colors else "unknown"
|
|
1124
|
-
text_parts.append(f" {category_total} {category.title()} detected, Color: {dominant_color}")
|
|
1125
|
-
|
|
1126
|
-
# Add color distribution summary
|
|
1127
|
-
color_distribution = color_summary.get("color_distribution", {})
|
|
1128
|
-
if color_distribution:
|
|
1129
|
-
top_colors = sorted(color_distribution.items(), key=lambda x: x[1], reverse=True)[:3]
|
|
1130
|
-
#text_parts.append("Top Colors:")
|
|
1131
|
-
for color, count in top_colors:
|
|
1132
|
-
percentage = (count / total_detections) * 100
|
|
1133
|
-
#text_parts.append(f" {color.title()}: {count} objects ({percentage:.1f}%)")
|
|
1134
|
-
|
|
1135
|
-
# Add key insights
|
|
1136
|
-
# if insights:
|
|
1137
|
-
# text_parts.append("Key Color Insights:")
|
|
1138
|
-
# for insight in insights[:3]: # Limit to first 3 insights
|
|
1139
|
-
# text_parts.append(f" - {insight}")
|
|
1140
|
-
|
|
1141
|
-
return "\n".join(text_parts)
|
|
1216
|
+
|
|
1142
1217
|
|
|
1143
1218
|
def reset_tracker(self) -> None:
|
|
1144
1219
|
"""Reset the advanced tracker instance."""
|
|
@@ -1222,7 +1297,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1222
1297
|
filtered.append(detections[best_idx])
|
|
1223
1298
|
used[best_idx] = True
|
|
1224
1299
|
return filtered
|
|
1225
|
-
|
|
1300
|
+
|
|
1226
1301
|
def get_config_schema(self) -> Dict[str, Any]:
|
|
1227
1302
|
"""Get JSON schema for configuration validation."""
|
|
1228
1303
|
return {
|
|
@@ -1242,7 +1317,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1242
1317
|
"required": ["confidence_threshold", "top_k_colors"],
|
|
1243
1318
|
"additionalProperties": False
|
|
1244
1319
|
}
|
|
1245
|
-
|
|
1320
|
+
|
|
1246
1321
|
def create_default_config(self, **overrides) -> ColorDetectionConfig:
|
|
1247
1322
|
"""Create default configuration with optional overrides."""
|
|
1248
1323
|
defaults = {
|
|
@@ -1261,7 +1336,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1261
1336
|
}
|
|
1262
1337
|
defaults.update(overrides)
|
|
1263
1338
|
return ColorDetectionConfig(**defaults)
|
|
1264
|
-
|
|
1339
|
+
|
|
1265
1340
|
def _update_color_tracking_state(self, detections: List[Dict]):
|
|
1266
1341
|
"""Track unique track_ids per category and color for total count."""
|
|
1267
1342
|
# Ensure storage is a defaultdict(set) to allow safe .add()
|
|
@@ -1298,20 +1373,19 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1298
1373
|
color_to_ids[color].update(ids)
|
|
1299
1374
|
return {color: len(ids) for color, ids in color_to_ids.items()}
|
|
1300
1375
|
|
|
1301
|
-
def get_total_category_counts(self):
|
|
1376
|
+
def get_total_category_counts(self,data):
|
|
1302
1377
|
"""Return total unique track_id count per category (across all colors)."""
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
return {cat: len(ids) for cat, ids in category_to_ids.items()}
|
|
1378
|
+
for det in data:
|
|
1379
|
+
track_id = det.get("track_id")
|
|
1380
|
+
category = det.get("category")
|
|
1381
|
+
if track_id and category:
|
|
1382
|
+
if category not in self.total_category_count:
|
|
1383
|
+
self.total_category_count[category] = set()
|
|
1384
|
+
self.total_category_count[category].add(track_id)
|
|
1385
|
+
|
|
1386
|
+
# Convert sets to counts
|
|
1387
|
+
return {cat: len(track_ids) for cat, track_ids in self.total_category_count.items()}
|
|
1388
|
+
|
|
1315
1389
|
|
|
1316
1390
|
def _get_track_ids_info(self, detections: List[Dict]) -> Dict[str, Any]:
|
|
1317
1391
|
"""Get detailed information about track IDs for color detections (per frame)."""
|
|
@@ -1379,7 +1453,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1379
1453
|
det.setdefault("masks", ["EMPTY"])
|
|
1380
1454
|
|
|
1381
1455
|
return processed_detections
|
|
1382
|
-
|
|
1456
|
+
|
|
1383
1457
|
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: ColorDetectionConfig,
|
|
1384
1458
|
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
1385
1459
|
"""Generate structured events for the output format with frame-based keys."""
|
|
@@ -1390,7 +1464,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1390
1464
|
total_detections = counting_summary.get("total_count", 0)
|
|
1391
1465
|
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
1392
1466
|
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
1393
|
-
|
|
1467
|
+
|
|
1394
1468
|
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
1395
1469
|
|
|
1396
1470
|
if total_detections > 0:
|
|
@@ -1401,11 +1475,11 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1401
1475
|
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
|
1402
1476
|
self.current_incident_end_timestamp = 'Incident still active'
|
|
1403
1477
|
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
|
1404
|
-
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
1478
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
1405
1479
|
self.current_incident_end_timestamp = current_timestamp
|
|
1406
1480
|
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
|
1407
1481
|
self.current_incident_end_timestamp = 'N/A'
|
|
1408
|
-
|
|
1482
|
+
|
|
1409
1483
|
if config.alert_config and hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
1410
1484
|
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
1411
1485
|
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
@@ -1456,7 +1530,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1456
1530
|
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1457
1531
|
}
|
|
1458
1532
|
})
|
|
1459
|
-
|
|
1533
|
+
|
|
1460
1534
|
event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
|
|
1461
1535
|
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
1462
1536
|
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
|
@@ -1468,7 +1542,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1468
1542
|
incidents.append({})
|
|
1469
1543
|
|
|
1470
1544
|
return incidents
|
|
1471
|
-
|
|
1545
|
+
|
|
1472
1546
|
def _check_alerts(self, summary: dict, frame_number:Any, config: ColorDetectionConfig) -> List[Dict]:
|
|
1473
1547
|
"""
|
|
1474
1548
|
Check if any alert thresholds are exceeded and return alert dicts.
|
|
@@ -1510,7 +1584,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1510
1584
|
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
1511
1585
|
|
|
1512
1586
|
for category, threshold in config.alert_config.count_thresholds.items():
|
|
1513
|
-
if category == "all" and total > threshold:
|
|
1587
|
+
if category == "all" and total > threshold:
|
|
1514
1588
|
|
|
1515
1589
|
alerts.append({
|
|
1516
1590
|
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
@@ -1520,7 +1594,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1520
1594
|
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
1521
1595
|
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1522
1596
|
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1523
|
-
}
|
|
1597
|
+
}
|
|
1524
1598
|
})
|
|
1525
1599
|
elif category in summary.get("per_category_count", {}):
|
|
1526
1600
|
count = summary.get("per_category_count", {})[category]
|
|
@@ -1533,12 +1607,17 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1533
1607
|
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
1534
1608
|
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1535
1609
|
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1536
|
-
}
|
|
1610
|
+
}
|
|
1537
1611
|
})
|
|
1538
1612
|
else:
|
|
1539
1613
|
pass
|
|
1540
1614
|
return alerts
|
|
1541
|
-
|
|
1615
|
+
|
|
1616
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
1617
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
|
1618
|
+
dt = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
|
|
1619
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1620
|
+
|
|
1542
1621
|
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
1543
1622
|
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
1544
1623
|
hours = int(timestamp // 3600)
|
|
@@ -1546,11 +1625,6 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1546
1625
|
seconds = round(float(timestamp % 60),2)
|
|
1547
1626
|
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
1548
1627
|
|
|
1549
|
-
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
1550
|
-
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
|
1551
|
-
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
1552
|
-
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1553
|
-
|
|
1554
1628
|
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
1555
1629
|
"""Get formatted current timestamp based on stream type."""
|
|
1556
1630
|
|
|
@@ -1564,6 +1638,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1564
1638
|
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1565
1639
|
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
1566
1640
|
|
|
1641
|
+
|
|
1567
1642
|
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
1568
1643
|
else:
|
|
1569
1644
|
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
@@ -1575,8 +1650,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1575
1650
|
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1576
1651
|
|
|
1577
1652
|
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
1578
|
-
|
|
1579
|
-
|
|
1653
|
+
|
|
1580
1654
|
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
1581
1655
|
else:
|
|
1582
1656
|
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
@@ -1595,22 +1669,22 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1595
1669
|
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
1596
1670
|
if not stream_info:
|
|
1597
1671
|
return "00:00:00"
|
|
1598
|
-
|
|
1672
|
+
|
|
1599
1673
|
if precision:
|
|
1600
1674
|
if self.start_timer is None:
|
|
1601
|
-
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "
|
|
1675
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC"))
|
|
1602
1676
|
return self._format_timestamp(self.start_timer)
|
|
1603
1677
|
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1604
|
-
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "
|
|
1678
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC"))
|
|
1605
1679
|
return self._format_timestamp(self.start_timer)
|
|
1606
1680
|
else:
|
|
1607
1681
|
return self._format_timestamp(self.start_timer)
|
|
1608
1682
|
|
|
1609
1683
|
if self.start_timer is None:
|
|
1610
|
-
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "
|
|
1684
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC"))
|
|
1611
1685
|
return self._format_timestamp(self.start_timer)
|
|
1612
1686
|
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1613
|
-
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "
|
|
1687
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC"))
|
|
1614
1688
|
return self._format_timestamp(self.start_timer)
|
|
1615
1689
|
|
|
1616
1690
|
else:
|
|
@@ -1632,7 +1706,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1632
1706
|
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
1633
1707
|
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
1634
1708
|
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1635
|
-
|
|
1709
|
+
|
|
1636
1710
|
def _format_timestamp(self, timestamp: Any) -> str:
|
|
1637
1711
|
"""Format a timestamp so that exactly two digits follow the decimal point (milliseconds).
|
|
1638
1712
|
|
|
@@ -1693,37 +1767,37 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1693
1767
|
"""Update zone tracking with current frame data."""
|
|
1694
1768
|
if not zone_analysis or not config.zone_config or not config.zone_config['zones']:
|
|
1695
1769
|
return {}
|
|
1696
|
-
|
|
1770
|
+
|
|
1697
1771
|
enhanced_zone_analysis = {}
|
|
1698
1772
|
zones = config.zone_config['zones']
|
|
1699
|
-
|
|
1773
|
+
|
|
1700
1774
|
# Initialize current frame zone tracks
|
|
1701
1775
|
current_frame_zone_tracks = {zone_name: set() for zone_name in zones.keys()}
|
|
1702
|
-
|
|
1776
|
+
|
|
1703
1777
|
# Initialize zone tracking storage
|
|
1704
1778
|
for zone_name in zones.keys():
|
|
1705
1779
|
if zone_name not in self._zone_current_track_ids:
|
|
1706
1780
|
self._zone_current_track_ids[zone_name] = set()
|
|
1707
1781
|
if zone_name not in self._zone_total_track_ids:
|
|
1708
1782
|
self._zone_total_track_ids[zone_name] = set()
|
|
1709
|
-
|
|
1783
|
+
|
|
1710
1784
|
# Check each detection against each zone
|
|
1711
1785
|
for detection in detections:
|
|
1712
1786
|
track_id = detection.get("track_id")
|
|
1713
1787
|
if track_id is None:
|
|
1714
1788
|
continue
|
|
1715
|
-
|
|
1789
|
+
|
|
1716
1790
|
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
1717
1791
|
if not bbox:
|
|
1718
1792
|
continue
|
|
1719
|
-
|
|
1793
|
+
|
|
1720
1794
|
# Check which zone this detection is in
|
|
1721
1795
|
for zone_name, zone_polygon in zones.items():
|
|
1722
1796
|
if self._is_in_zone(bbox, zone_polygon):
|
|
1723
1797
|
current_frame_zone_tracks[zone_name].add(track_id)
|
|
1724
1798
|
if track_id not in self.color_det_dict: # Use color_det_dict for consistency
|
|
1725
1799
|
self.color_det_dict[track_id] = [detection.get("main_color", "unknown"), detection.get("confidence", 0.0)]
|
|
1726
|
-
|
|
1800
|
+
|
|
1727
1801
|
# Update zone tracking for each zone
|
|
1728
1802
|
for zone_name, zone_counts in zone_analysis.items():
|
|
1729
1803
|
current_tracks = current_frame_zone_tracks.get(zone_name, set())
|
|
@@ -1731,7 +1805,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1731
1805
|
self._zone_total_track_ids[zone_name].update(current_tracks)
|
|
1732
1806
|
self._zone_current_counts[zone_name] = len(current_tracks)
|
|
1733
1807
|
self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
|
|
1734
|
-
|
|
1808
|
+
|
|
1735
1809
|
enhanced_zone_analysis[zone_name] = {
|
|
1736
1810
|
"current_count": self._zone_current_counts[zone_name],
|
|
1737
1811
|
"total_count": self._zone_total_counts[zone_name],
|
|
@@ -1739,7 +1813,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
|
1739
1813
|
"total_track_ids": list(self._zone_total_track_ids[zone_name]),
|
|
1740
1814
|
"original_counts": zone_counts
|
|
1741
1815
|
}
|
|
1742
|
-
|
|
1816
|
+
|
|
1743
1817
|
return enhanced_zone_analysis
|
|
1744
1818
|
|
|
1745
1819
|
def _compute_iou(self, box1: Any, box2: Any) -> float:
|