matrice-analytics 0.1.97__py3-none-any.whl → 0.1.106__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/post_processing/advanced_tracker/config.py +8 -4
- matrice_analytics/post_processing/advanced_tracker/track_class_aggregator.py +128 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +22 -1
- matrice_analytics/post_processing/config.py +4 -4
- matrice_analytics/post_processing/face_reg/face_recognition.py +706 -73
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +25 -14
- matrice_analytics/post_processing/usecases/footfall.py +109 -2
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +55 -37
- matrice_analytics/post_processing/usecases/vehicle_monitoring_drone_view.py +219 -3
- matrice_analytics/post_processing/usecases/vehicle_monitoring_parking_lot.py +18 -1
- matrice_analytics/post_processing/utils/__init__.py +5 -0
- matrice_analytics/post_processing/utils/agnostic_nms.py +759 -0
- matrice_analytics/post_processing/utils/alert_instance_utils.py +37 -2
- {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.106.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.106.dist-info}/RECORD +18 -16
- {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.106.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.106.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.106.dist-info}/top_level.txt +0 -0
|
@@ -154,33 +154,40 @@ class PeopleActivityLogging:
|
|
|
154
154
|
except Exception as e:
|
|
155
155
|
self.logger.error(f"Error enqueueing detection: {e}", exc_info=True)
|
|
156
156
|
|
|
157
|
-
def _should_log_detection(self, employee_id: str) -> bool:
|
|
157
|
+
def _should_log_detection(self, employee_id: str, camera_id: str = "") -> bool:
|
|
158
158
|
"""
|
|
159
|
-
Check if detection should be logged based on employee ID and time threshold.
|
|
160
|
-
Only log if employee_id was not detected in the past
|
|
159
|
+
Check if detection should be logged based on employee ID (+ camera ID) and time threshold.
|
|
160
|
+
Only log if the same (employee_id, camera_id) was not detected in the past N seconds.
|
|
161
|
+
If camera_id is empty, falls back to global employee_id de-duplication (backward compatible).
|
|
161
162
|
|
|
162
163
|
TODO: Make this use track_id or similarity check instead of just employee_id in 10 secs window
|
|
163
164
|
for better deduplication across different detection sessions.
|
|
164
165
|
"""
|
|
165
166
|
current_time = time.time()
|
|
167
|
+
dedupe_key = f"{employee_id}::{camera_id}" if camera_id else employee_id
|
|
166
168
|
|
|
167
169
|
# Clean up old entries (older than threshold)
|
|
168
170
|
expired_keys = [
|
|
169
|
-
|
|
171
|
+
key for key, timestamp in self.recent_employee_detections.items()
|
|
170
172
|
if current_time - timestamp > self.employee_detection_threshold
|
|
171
173
|
]
|
|
172
|
-
for
|
|
173
|
-
del self.recent_employee_detections[
|
|
174
|
+
for key in expired_keys:
|
|
175
|
+
del self.recent_employee_detections[key]
|
|
174
176
|
|
|
175
|
-
# Check if employee was recently detected
|
|
176
|
-
if
|
|
177
|
-
last_detection = self.recent_employee_detections[
|
|
177
|
+
# Check if employee was recently detected (per camera_id)
|
|
178
|
+
if dedupe_key in self.recent_employee_detections:
|
|
179
|
+
last_detection = self.recent_employee_detections[dedupe_key]
|
|
178
180
|
if current_time - last_detection < self.employee_detection_threshold:
|
|
179
|
-
self.logger.debug(
|
|
181
|
+
self.logger.debug(
|
|
182
|
+
"Skipping logging for employee %s (camera_id=%s) - detected %.1fs ago",
|
|
183
|
+
employee_id,
|
|
184
|
+
camera_id,
|
|
185
|
+
current_time - last_detection,
|
|
186
|
+
)
|
|
180
187
|
return False
|
|
181
188
|
|
|
182
|
-
# Update detection time for this employee
|
|
183
|
-
self.recent_employee_detections[
|
|
189
|
+
# Update detection time for this (employee, camera)
|
|
190
|
+
self.recent_employee_detections[dedupe_key] = current_time
|
|
184
191
|
return True
|
|
185
192
|
|
|
186
193
|
async def _process_activity(self, activity_data: Dict):
|
|
@@ -202,8 +209,12 @@ class PeopleActivityLogging:
|
|
|
202
209
|
return
|
|
203
210
|
|
|
204
211
|
# Check if we should log this detection (avoid duplicates within time window)
|
|
205
|
-
if not self._should_log_detection(employee_id):
|
|
206
|
-
self.logger.debug(
|
|
212
|
+
if not self._should_log_detection(employee_id, camera_id=camera_id):
|
|
213
|
+
self.logger.debug(
|
|
214
|
+
"Skipping activity log for employee_id=%s (camera_id=%s) (within cooldown period)",
|
|
215
|
+
employee_id,
|
|
216
|
+
camera_id,
|
|
217
|
+
)
|
|
207
218
|
return None
|
|
208
219
|
|
|
209
220
|
# Encode frame as base64 JPEG
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
from typing import Any, Dict, List, Optional
|
|
2
2
|
from dataclasses import asdict
|
|
3
3
|
import time
|
|
4
|
+
import math
|
|
5
|
+
import numpy as np
|
|
4
6
|
from datetime import datetime, timezone
|
|
7
|
+
from collections import defaultdict, deque
|
|
5
8
|
|
|
6
9
|
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
|
7
10
|
from ..utils import (
|
|
@@ -19,6 +22,76 @@ from ..utils import (
|
|
|
19
22
|
from dataclasses import dataclass, field
|
|
20
23
|
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
21
24
|
|
|
25
|
+
class TrajectoryCorrector:
|
|
26
|
+
"""
|
|
27
|
+
Handles Velocity-Fusion logic to correct model orientation errors.
|
|
28
|
+
Stores history of track centers and applies EMA smoothing.
|
|
29
|
+
"""
|
|
30
|
+
def __init__(self):
|
|
31
|
+
# track_id -> { "centers": deque, "smooth_angle": float }
|
|
32
|
+
self.history = defaultdict(lambda: {
|
|
33
|
+
"centers": deque(maxlen=10), # Lookback for velocity
|
|
34
|
+
"smooth_angle": None # For EMA smoothing
|
|
35
|
+
})
|
|
36
|
+
|
|
37
|
+
def update_and_correct(self, track_id, center, raw_angle_deg):
|
|
38
|
+
"""
|
|
39
|
+
Returns the corrected angle based on velocity fusion.
|
|
40
|
+
"""
|
|
41
|
+
state = self.history[track_id]
|
|
42
|
+
state["centers"].append(center)
|
|
43
|
+
|
|
44
|
+
# 1. Calculate Velocity Angle
|
|
45
|
+
velocity_angle = self._compute_velocity_angle(state["centers"])
|
|
46
|
+
|
|
47
|
+
# 2. Apply +90 Fix to Raw Model Angle (Matches your successful tests)
|
|
48
|
+
# Note: raw_angle_deg comes from predict.py
|
|
49
|
+
if raw_angle_deg is None: raw_angle_deg = 0.0
|
|
50
|
+
model_angle = (raw_angle_deg + 90) % 360
|
|
51
|
+
|
|
52
|
+
# 3. Determine Target (Velocity vs Model)
|
|
53
|
+
# Hybrid Logic: If moving (velocity valid), use Physics. Else, use Visuals.
|
|
54
|
+
target_angle = velocity_angle if velocity_angle is not None else model_angle
|
|
55
|
+
|
|
56
|
+
# 4. Apply EMA Smoothing (The Jitter Killer)
|
|
57
|
+
# alpha=0.2 means we trust new data 20%, old history 80%
|
|
58
|
+
state["smooth_angle"] = self._apply_ema(state["smooth_angle"], target_angle, alpha=0.2)
|
|
59
|
+
|
|
60
|
+
return state["smooth_angle"]
|
|
61
|
+
|
|
62
|
+
def _compute_velocity_angle(self, centers):
|
|
63
|
+
if len(centers) < 2:
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
# Look back 5 frames for stability
|
|
67
|
+
lookback = min(len(centers), 5)
|
|
68
|
+
(x_past, y_past) = centers[-lookback]
|
|
69
|
+
(x_now, y_now) = centers[-1]
|
|
70
|
+
|
|
71
|
+
dx = x_now - x_past
|
|
72
|
+
dy = y_now - y_past
|
|
73
|
+
|
|
74
|
+
# THRESHOLD: 2.5 pixels (Validated in your tests)
|
|
75
|
+
# If moving less than this, velocity is noise.
|
|
76
|
+
if math.hypot(dx, dy) < 2.5:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
# Angle calculation (0-360)
|
|
80
|
+
return math.degrees(math.atan2(-dy, dx)) % 360
|
|
81
|
+
|
|
82
|
+
def _apply_ema(self, current_smooth, new_target, alpha=0.2):
|
|
83
|
+
if current_smooth is None:
|
|
84
|
+
return new_target
|
|
85
|
+
|
|
86
|
+
# Vector smoothing to handle 0/360 wrap-around correctly
|
|
87
|
+
prev_rad = math.radians(current_smooth)
|
|
88
|
+
curr_rad = math.radians(new_target)
|
|
89
|
+
|
|
90
|
+
new_sin = (1 - alpha) * math.sin(prev_rad) + alpha * math.sin(curr_rad)
|
|
91
|
+
new_cos = (1 - alpha) * math.cos(prev_rad) + alpha * math.cos(curr_rad)
|
|
92
|
+
|
|
93
|
+
return math.degrees(math.atan2(new_sin, new_cos)) % 360
|
|
94
|
+
|
|
22
95
|
@dataclass
|
|
23
96
|
class FootFallConfig(BaseConfig):
|
|
24
97
|
"""Configuration for footfall use case."""
|
|
@@ -79,9 +152,12 @@ class FootFallUseCase(BaseProcessor):
|
|
|
79
152
|
self.category = "retail"
|
|
80
153
|
self.CASE_TYPE: Optional[str] = 'footfall'
|
|
81
154
|
self.CASE_VERSION: Optional[str] = '1.1'
|
|
82
|
-
self.target_categories = ['person']
|
|
155
|
+
self.target_categories = ['person']
|
|
83
156
|
self.smoothing_tracker = None
|
|
84
157
|
self.tracker = None
|
|
158
|
+
|
|
159
|
+
# Initialize the Velocity Logic
|
|
160
|
+
self.trajectory_corrector = TrajectoryCorrector()
|
|
85
161
|
self._total_frame_counter = 0
|
|
86
162
|
self._global_frame_offset = 0
|
|
87
163
|
self._tracking_start_time = None
|
|
@@ -144,9 +220,40 @@ class FootFallUseCase(BaseProcessor):
|
|
|
144
220
|
match_thresh=0.8)
|
|
145
221
|
self.tracker = AdvancedTracker(tracker_config)
|
|
146
222
|
self.logger.info("Initialized AdvancedTracker for People Counting")
|
|
223
|
+
|
|
224
|
+
# 1. Run Standard Tracker (Assigns IDs)
|
|
147
225
|
processed_data = self.tracker.update(processed_data)
|
|
226
|
+
|
|
227
|
+
# =========================================================
|
|
228
|
+
# NEW: INJECT VELOCITY FUSION LOGIC
|
|
229
|
+
# =========================================================
|
|
230
|
+
for det in processed_data:
|
|
231
|
+
track_id = det.get("track_id")
|
|
232
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
233
|
+
|
|
234
|
+
# Check for 'raw_angle' (from predict.py) or 'orientation'
|
|
235
|
+
raw_angle = det.get("raw_angle", det.get("orientation", 0.0))
|
|
236
|
+
|
|
237
|
+
if track_id is not None and bbox:
|
|
238
|
+
# Calculate Center (cx, cy)
|
|
239
|
+
cx = int((bbox[0] + bbox[2]) / 2)
|
|
240
|
+
cy = int((bbox[1] + bbox[3]) / 2)
|
|
241
|
+
|
|
242
|
+
# Run Correction (Velocity + EMA + 90 Fix)
|
|
243
|
+
final_angle = self.trajectory_corrector.update_and_correct(
|
|
244
|
+
track_id,
|
|
245
|
+
(cx, cy),
|
|
246
|
+
raw_angle
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
# OVERWRITE the detection angle
|
|
250
|
+
# This ensures _generate_tracking_stats uses YOUR logic
|
|
251
|
+
det["orientation"] = final_angle # For UI
|
|
252
|
+
det["angle"] = final_angle # For Analytics
|
|
253
|
+
# =========================================================
|
|
254
|
+
|
|
148
255
|
except Exception as e:
|
|
149
|
-
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
256
|
+
self.logger.warning(f"AdvancedTracker/Velocity failed: {e}")
|
|
150
257
|
|
|
151
258
|
self._update_tracking_state(processed_data)
|
|
152
259
|
self._total_frame_counter += 1
|
|
@@ -535,6 +535,10 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
535
535
|
self.plate_logger: Optional[LicensePlateMonitorLogger] = None
|
|
536
536
|
self._logging_enabled = True # False //ToDo: DISABLED FOR NOW, ENABLED FOR PRODUCTION. ##
|
|
537
537
|
self._plate_logger_initialized = False # Track if plate logger has been initialized
|
|
538
|
+
|
|
539
|
+
# Track which track_ids have been logged to avoid duplicate logging
|
|
540
|
+
# Only log confirmed/consensus plates, not every OCR prediction
|
|
541
|
+
self._logged_track_ids: set = set()
|
|
538
542
|
|
|
539
543
|
# Initialize instant alert manager (will be lazily initialized on first process() call)
|
|
540
544
|
self.alert_manager: Optional[ALERT_INSTANCE] = None
|
|
@@ -942,6 +946,8 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
942
946
|
self._unique_plate_texts = {}
|
|
943
947
|
self.helper = {}
|
|
944
948
|
self.unique_plate_track = {}
|
|
949
|
+
# Reset logged track_ids to allow fresh logging
|
|
950
|
+
self._logged_track_ids = set()
|
|
945
951
|
self.logger.info("Plate tracking state reset")
|
|
946
952
|
|
|
947
953
|
def reset_all_tracking(self) -> None:
|
|
@@ -1147,13 +1153,19 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1147
1153
|
|
|
1148
1154
|
async def _log_detected_plates(self, detections: List[Dict[str, Any]], config: LicensePlateMonitorConfig,
|
|
1149
1155
|
stream_info: Optional[Dict[str, Any]], image_bytes: Optional[bytes] = None) -> None:
|
|
1150
|
-
"""
|
|
1156
|
+
"""
|
|
1157
|
+
Log confirmed/consensus plates to RPC server.
|
|
1158
|
+
|
|
1159
|
+
Only logs plates that have reached consensus (are in _tracked_plate_texts),
|
|
1160
|
+
and only logs each track_id once to avoid duplicate logging of garbage OCR predictions.
|
|
1161
|
+
Uses the confirmed consensus plate text, not the raw frame-by-frame OCR output.
|
|
1162
|
+
"""
|
|
1151
1163
|
# Enhanced logging for diagnostics
|
|
1152
1164
|
print(f"[LP_LOGGING] Starting plate logging check - detections count: {len(detections)}")
|
|
1153
1165
|
self.logger.info(f"[LP_LOGGING] Starting plate logging check - detections count: {len(detections)}")
|
|
1154
|
-
self.logger.info(f"[LP_LOGGING] Logging enabled: {self._logging_enabled}, Plate logger exists: {self.plate_logger is not None}
|
|
1166
|
+
self.logger.info(f"[LP_LOGGING] Logging enabled: {self._logging_enabled}, Plate logger exists: {self.plate_logger is not None}")
|
|
1167
|
+
self.logger.info(f"[LP_LOGGING] Confirmed plates (tracked): {len(self._tracked_plate_texts)}, Already logged tracks: {len(self._logged_track_ids)}")
|
|
1155
1168
|
|
|
1156
|
-
#self._logging_enabled = False # ToDo: DISABLED FOR NOW, ENABLED FOR PRODUCTION
|
|
1157
1169
|
if not self._logging_enabled:
|
|
1158
1170
|
print("[LP_LOGGING] Plate logging is DISABLED")
|
|
1159
1171
|
self.logger.warning("[LP_LOGGING] Plate logging is DISABLED - logging_enabled flag is False")
|
|
@@ -1164,11 +1176,6 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1164
1176
|
self.logger.warning("[LP_LOGGING] Plate logging SKIPPED - plate_logger is not initialized (lpr_server_id may not be configured)")
|
|
1165
1177
|
return
|
|
1166
1178
|
|
|
1167
|
-
# if not stream_info:
|
|
1168
|
-
# print("[LP_LOGGING] Plate logging SKIPPED - stream_info is None")
|
|
1169
|
-
# self.logger.warning("[LP_LOGGING] Plate logging SKIPPED - stream_info is None")
|
|
1170
|
-
# return
|
|
1171
|
-
|
|
1172
1179
|
print("[LP_LOGGING] All pre-conditions met, proceeding with plate logging")
|
|
1173
1180
|
self.logger.info(f"[LP_LOGGING] All pre-conditions met, proceeding with plate logging")
|
|
1174
1181
|
|
|
@@ -1195,35 +1202,45 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1195
1202
|
else:
|
|
1196
1203
|
self.logger.warning(f"[LP_LOGGING] Failed to decode image bytes")
|
|
1197
1204
|
except Exception as e:
|
|
1198
|
-
#pass
|
|
1199
1205
|
self.logger.error(f"[LP_LOGGING] Exception while encoding frame image: {e}", exc_info=True)
|
|
1200
1206
|
else:
|
|
1201
1207
|
self.logger.info(f"[LP_LOGGING] No image_bytes provided, sending without image")
|
|
1202
1208
|
|
|
1203
|
-
#
|
|
1204
|
-
|
|
1205
|
-
|
|
1209
|
+
# Only log CONFIRMED/CONSENSUS plates from _tracked_plate_texts
|
|
1210
|
+
# Avoid logging every raw OCR prediction - only log final confirmed plate per track_id
|
|
1211
|
+
plates_to_log = {} # track_id -> consensus_plate_text
|
|
1212
|
+
|
|
1206
1213
|
for det in detections:
|
|
1207
|
-
|
|
1208
|
-
if
|
|
1209
|
-
detections_without_text += 1
|
|
1214
|
+
track_id = det.get('track_id')
|
|
1215
|
+
if track_id is None:
|
|
1210
1216
|
continue
|
|
1211
|
-
|
|
1217
|
+
|
|
1218
|
+
# Skip if this track_id has already been logged
|
|
1219
|
+
if track_id in self._logged_track_ids:
|
|
1220
|
+
self.logger.debug(f"[LP_LOGGING] Skipping track_id={track_id} - already logged")
|
|
1221
|
+
continue
|
|
1222
|
+
|
|
1223
|
+
# Only log if this track_id has a confirmed/consensus plate
|
|
1224
|
+
if track_id in self._tracked_plate_texts:
|
|
1225
|
+
consensus_plate = self._tracked_plate_texts[track_id]
|
|
1226
|
+
if consensus_plate:
|
|
1227
|
+
plates_to_log[track_id] = consensus_plate
|
|
1228
|
+
self.logger.debug(f"[LP_LOGGING] Found confirmed plate for track_id={track_id}: {consensus_plate}")
|
|
1212
1229
|
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1230
|
+
confirmed_count = len(plates_to_log)
|
|
1231
|
+
raw_ocr_count = sum(1 for d in detections if d.get('plate_text'))
|
|
1232
|
+
print(f"[LP_LOGGING] Confirmed plates to log: {confirmed_count} (from {raw_ocr_count} raw OCR detections)")
|
|
1233
|
+
self.logger.info(f"[LP_LOGGING] Confirmed plates to log: {confirmed_count}, Raw OCR detections: {raw_ocr_count}")
|
|
1234
|
+
self.logger.info(f"[LP_LOGGING] Plates: {list(plates_to_log.values())}")
|
|
1217
1235
|
|
|
1218
|
-
# Log each
|
|
1236
|
+
# Log each confirmed plate (respecting cooldown)
|
|
1219
1237
|
if plates_to_log:
|
|
1220
|
-
print(f"[LP_LOGGING] Logging {len(plates_to_log)} plates with cooldown={config.plate_log_cooldown}s")
|
|
1221
|
-
self.logger.info(f"[LP_LOGGING] Logging {len(plates_to_log)} plates with cooldown={config.plate_log_cooldown}s")
|
|
1238
|
+
print(f"[LP_LOGGING] Logging {len(plates_to_log)} confirmed plates with cooldown={config.plate_log_cooldown}s")
|
|
1239
|
+
self.logger.info(f"[LP_LOGGING] Logging {len(plates_to_log)} confirmed plates with cooldown={config.plate_log_cooldown}s")
|
|
1222
1240
|
try:
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
self.logger.info(f"[LP_LOGGING] Processing plate: {plate_text}")
|
|
1241
|
+
for track_id, plate_text in plates_to_log.items():
|
|
1242
|
+
print(f"[LP_LOGGING] Processing confirmed plate: {plate_text} (track_id={track_id})")
|
|
1243
|
+
self.logger.info(f"[LP_LOGGING] Processing confirmed plate: {plate_text} (track_id={track_id})")
|
|
1227
1244
|
try:
|
|
1228
1245
|
result = await self.plate_logger.log_plate(
|
|
1229
1246
|
plate_text=plate_text,
|
|
@@ -1232,25 +1249,26 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1232
1249
|
image_data=image_data,
|
|
1233
1250
|
cooldown=config.plate_log_cooldown
|
|
1234
1251
|
)
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1252
|
+
if result:
|
|
1253
|
+
# Mark this track_id as logged to avoid duplicate logging
|
|
1254
|
+
self._logged_track_ids.add(track_id)
|
|
1255
|
+
print(f"[LP_LOGGING] Plate {plate_text}: SENT (track_id={track_id} marked as logged)")
|
|
1256
|
+
self.logger.info(f"[LP_LOGGING] Plate {plate_text}: SENT (track_id={track_id} marked as logged)")
|
|
1257
|
+
else:
|
|
1258
|
+
print(f"[LP_LOGGING] Plate {plate_text}: SKIPPED (cooldown)")
|
|
1259
|
+
self.logger.info(f"[LP_LOGGING] Plate {plate_text}: SKIPPED (cooldown)")
|
|
1238
1260
|
except Exception as e:
|
|
1239
|
-
#pass
|
|
1240
1261
|
print(f"[LP_LOGGING] ERROR - Plate {plate_text} failed: {e}")
|
|
1241
1262
|
self.logger.error(f"[LP_LOGGING] Plate {plate_text} raised exception: {e}", exc_info=True)
|
|
1242
1263
|
|
|
1243
1264
|
print("[LP_LOGGING] Plate logging complete")
|
|
1244
|
-
self.logger.info(f"[LP_LOGGING] Plate logging complete")
|
|
1265
|
+
self.logger.info(f"[LP_LOGGING] Plate logging complete - {len(self._logged_track_ids)} total tracks logged so far")
|
|
1245
1266
|
except Exception as e:
|
|
1246
|
-
print(f"[LP_LOGGING] CRITICAL ERROR during plate logging: {e}")
|
|
1247
|
-
|
|
1248
1267
|
print(f"[LP_LOGGING] CRITICAL ERROR during plate logging: {e}")
|
|
1249
1268
|
self.logger.error(f"[LP_LOGGING] CRITICAL ERROR during plate logging: {e}", exc_info=True)
|
|
1250
|
-
pass
|
|
1251
1269
|
else:
|
|
1252
|
-
print("[LP_LOGGING] No plates to log")
|
|
1253
|
-
self.logger.info(f"[LP_LOGGING] No plates to log (
|
|
1270
|
+
print("[LP_LOGGING] No confirmed plates to log (plates may still be reaching consensus)")
|
|
1271
|
+
self.logger.info(f"[LP_LOGGING] No confirmed plates to log (waiting for consensus)")
|
|
1254
1272
|
|
|
1255
1273
|
async def process(self, data: Any, config: ConfigProtocol, input_bytes: Optional[bytes] = None,
|
|
1256
1274
|
context: Optional[ProcessingContext] = None, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
@@ -19,6 +19,7 @@ from ..utils import (
|
|
|
19
19
|
from dataclasses import dataclass, field
|
|
20
20
|
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
21
21
|
from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
|
|
22
|
+
from ..utils.agnostic_nms import AgnosticNMS
|
|
22
23
|
|
|
23
24
|
@dataclass
|
|
24
25
|
class VehicleMonitoringDroneViewConfig(BaseConfig):
|
|
@@ -30,6 +31,17 @@ class VehicleMonitoringDroneViewConfig(BaseConfig):
|
|
|
30
31
|
smoothing_confidence_range_factor: float = 0.5
|
|
31
32
|
confidence_threshold: float = 0.6
|
|
32
33
|
|
|
34
|
+
# Agnostic-NMS: Configuration parameters
|
|
35
|
+
enable_nms: bool = True
|
|
36
|
+
nms_iou_threshold: float = 0.45
|
|
37
|
+
nms_class_agnostic: bool = True
|
|
38
|
+
nms_min_box_size: float = 2.0
|
|
39
|
+
nms_use_vectorized: bool = True
|
|
40
|
+
|
|
41
|
+
# Class Aggregation: Configuration parameters
|
|
42
|
+
enable_class_aggregation: bool = True
|
|
43
|
+
class_aggregation_window_size: int = 30 # 30 frames ≈ 1 second at 30 FPS
|
|
44
|
+
|
|
33
45
|
#JBK_720_GATE POLYGON = [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]]
|
|
34
46
|
zone_config: Optional[Dict[str, List[List[float]]]] = None #field(
|
|
35
47
|
# default_factory=lambda: {
|
|
@@ -99,6 +111,9 @@ class VehicleMonitoringDroneViewUseCase(BaseProcessor):
|
|
|
99
111
|
self._zone_current_counts = {} # zone_name -> current count in zone
|
|
100
112
|
self._zone_total_counts = {} # zone_name -> total count that have been in zone
|
|
101
113
|
|
|
114
|
+
# Agnostic-NMS: Initialize reusable NMS module
|
|
115
|
+
self._nms_module = None
|
|
116
|
+
|
|
102
117
|
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
|
103
118
|
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
104
119
|
processing_start = time.time()
|
|
@@ -110,18 +125,28 @@ class VehicleMonitoringDroneViewUseCase(BaseProcessor):
|
|
|
110
125
|
# Determine if zones are configured
|
|
111
126
|
has_zones = bool(config.zone_config and config.zone_config.get('zones'))
|
|
112
127
|
|
|
128
|
+
# ===== DEBUG POINT 1: RAW INPUT =====
|
|
129
|
+
self._log_detection_stats(data, "01_RAW_INPUT", show_samples=True)
|
|
130
|
+
|
|
113
131
|
# Normalize typical YOLO outputs (COCO pretrained) to internal schema
|
|
114
132
|
data = self._normalize_yolo_results(data, getattr(config, 'index_to_category', None))
|
|
115
133
|
|
|
134
|
+
# ===== DEBUG POINT 2: AFTER NORMALIZATION =====
|
|
135
|
+
self._log_detection_stats(data, "02_AFTER_NORMALIZATION", show_samples=True)
|
|
136
|
+
|
|
116
137
|
input_format = match_results_structure(data)
|
|
117
138
|
context.input_format = input_format
|
|
118
139
|
context.confidence_threshold = config.confidence_threshold
|
|
119
|
-
|
|
140
|
+
# NOTE : Confidence Threshold overwrite disabled for now
|
|
141
|
+
# config.confidence_threshold = 0.25
|
|
142
|
+
|
|
120
143
|
# param to be updated
|
|
121
144
|
|
|
122
145
|
if config.confidence_threshold is not None:
|
|
123
146
|
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
124
147
|
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
148
|
+
# ===== DEBUG POINT 3: AFTER CONFIDENCE FILTER =====
|
|
149
|
+
self._log_detection_stats(processed_data, "03_AFTER_CONFIDENCE_FILTER")
|
|
125
150
|
else:
|
|
126
151
|
processed_data = data
|
|
127
152
|
self.logger.debug("Did not apply confidence filtering since no threshold provided")
|
|
@@ -129,11 +154,74 @@ class VehicleMonitoringDroneViewUseCase(BaseProcessor):
|
|
|
129
154
|
if config.index_to_category:
|
|
130
155
|
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
131
156
|
self.logger.debug("Applied category mapping")
|
|
157
|
+
# ===== DEBUG POINT 4: AFTER CATEGORY MAPPING =====
|
|
158
|
+
self._log_detection_stats(processed_data, "04_AFTER_CATEGORY_MAPPING")
|
|
159
|
+
|
|
160
|
+
# Agnostic-NMS: Apply NMS using reusable module with safety
|
|
161
|
+
if getattr(config, 'enable_nms', False):
|
|
162
|
+
pre_nms_count = len(processed_data)
|
|
163
|
+
|
|
164
|
+
# ===== DEBUG POINT 5: BEFORE NMS =====
|
|
165
|
+
self._log_detection_stats(processed_data, "05_BEFORE_NMS", show_samples=True)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# Safety: Log pre-NMS state for debugging
|
|
169
|
+
if pre_nms_count > 0:
|
|
170
|
+
sample_det = processed_data[0]
|
|
171
|
+
self.logger.debug(
|
|
172
|
+
f"Pre-NMS sample detection keys: {list(sample_det.keys())}, "
|
|
173
|
+
f"category type: {type(sample_det.get('category')).__name__}, "
|
|
174
|
+
f"confidence type: {type(sample_det.get('confidence')).__name__}"
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
# Initialize NMS module if needed
|
|
179
|
+
if self._nms_module is None:
|
|
180
|
+
self._nms_module = AgnosticNMS(
|
|
181
|
+
iou_threshold=getattr(config, 'nms_iou_threshold', 0.45),
|
|
182
|
+
min_box_size=getattr(config, 'nms_min_box_size', 2.0),
|
|
183
|
+
use_vectorized=getattr(config, 'nms_use_vectorized', True)
|
|
184
|
+
)
|
|
185
|
+
self.logger.info("AgnosticNMS module initialized")
|
|
186
|
+
|
|
187
|
+
# Apply NMS
|
|
188
|
+
processed_data = self._nms_module.apply(
|
|
189
|
+
processed_data,
|
|
190
|
+
class_agnostic=getattr(config, 'nms_class_agnostic', True),
|
|
191
|
+
target_categories=self.target_categories
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
post_nms_count = len(processed_data)
|
|
195
|
+
suppressed_count = pre_nms_count - post_nms_count
|
|
196
|
+
|
|
197
|
+
# ===== DEBUG POINT 6: AFTER NMS =====
|
|
198
|
+
self._log_detection_stats(processed_data, "06_AFTER_NMS")
|
|
199
|
+
|
|
200
|
+
self.logger.info(
|
|
201
|
+
f"NMS applied successfully: {pre_nms_count} -> {post_nms_count} detections "
|
|
202
|
+
f"({suppressed_count} suppressed, {100 * suppressed_count / max(pre_nms_count, 1):.1f}%)"
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
except ValueError as ve:
|
|
206
|
+
# Schema validation error - log detailed diagnostics
|
|
207
|
+
self.logger.error(f"NMS schema validation failed: {ve}")
|
|
208
|
+
self.logger.error("Continuing without NMS. Check logs above for detailed diagnostics.")
|
|
209
|
+
|
|
210
|
+
except Exception as e:
|
|
211
|
+
# Unexpected error - log full details
|
|
212
|
+
import traceback
|
|
213
|
+
self.logger.error(f"NMS failed with unexpected error: {e}")
|
|
214
|
+
self.logger.error(f"Traceback: {traceback.format_exc()}")
|
|
215
|
+
self.logger.error("Continuing without NMS.")
|
|
216
|
+
|
|
132
217
|
|
|
133
218
|
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
134
219
|
if config.target_categories:
|
|
135
220
|
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
136
221
|
self.logger.debug("Applied category filtering")
|
|
222
|
+
|
|
223
|
+
# ===== DEBUG POINT 7: AFTER TARGET CATEGORY FILTER =====
|
|
224
|
+
self._log_detection_stats(processed_data, "07_AFTER_TARGET_FILTER")
|
|
137
225
|
|
|
138
226
|
|
|
139
227
|
if config.enable_smoothing:
|
|
@@ -149,14 +237,34 @@ class VehicleMonitoringDroneViewUseCase(BaseProcessor):
|
|
|
149
237
|
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
150
238
|
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
151
239
|
|
|
240
|
+
# ===== DEBUG POINT 8: AFTER SMOOTHING =====
|
|
241
|
+
self._log_detection_stats(processed_data, "08_AFTER_SMOOTHING")
|
|
242
|
+
|
|
152
243
|
try:
|
|
153
244
|
from ..advanced_tracker import AdvancedTracker
|
|
154
245
|
from ..advanced_tracker.config import TrackerConfig
|
|
155
246
|
if self.tracker is None:
|
|
156
|
-
tracker_config = TrackerConfig(
|
|
247
|
+
tracker_config = TrackerConfig(
|
|
248
|
+
# CLASS AGGREGATION: Map from use case config
|
|
249
|
+
enable_class_aggregation=config.enable_class_aggregation,
|
|
250
|
+
class_aggregation_window_size=config.class_aggregation_window_size
|
|
251
|
+
)
|
|
157
252
|
self.tracker = AdvancedTracker(tracker_config)
|
|
158
253
|
self.logger.info("Initialized AdvancedTracker for Vehicle Monitoring Drone View Use Case")
|
|
254
|
+
|
|
255
|
+
if config.enable_class_aggregation:
|
|
256
|
+
self.logger.info(
|
|
257
|
+
f"AdvancedTracker initialized with class aggregation "
|
|
258
|
+
f"(window_size={config.class_aggregation_window_size})"
|
|
259
|
+
)
|
|
260
|
+
else:
|
|
261
|
+
self.logger.info("AdvancedTracker initialized without class aggregation")
|
|
262
|
+
|
|
159
263
|
processed_data = self.tracker.update(processed_data)
|
|
264
|
+
|
|
265
|
+
# ===== DEBUG POINT 9: AFTER TRACKING =====
|
|
266
|
+
self._log_detection_stats(processed_data, "09_AFTER_TRACKING")
|
|
267
|
+
|
|
160
268
|
except Exception as e:
|
|
161
269
|
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
162
270
|
|
|
@@ -1004,4 +1112,112 @@ class VehicleMonitoringDroneViewUseCase(BaseProcessor):
|
|
|
1004
1112
|
return self._format_timestamp(self._tracking_start_time)
|
|
1005
1113
|
|
|
1006
1114
|
def _set_tracking_start_time(self) -> None:
|
|
1007
|
-
self._tracking_start_time = time.time()
|
|
1115
|
+
self._tracking_start_time = time.time()
|
|
1116
|
+
|
|
1117
|
+
|
|
1118
|
+
def _log_detection_stats(self, data: Any, stage_name: str, show_samples: bool = False) -> None:
|
|
1119
|
+
"""
|
|
1120
|
+
Log detailed detection statistics at any pipeline stage.
|
|
1121
|
+
|
|
1122
|
+
Args:
|
|
1123
|
+
data: Detection data (list or dict format)
|
|
1124
|
+
stage_name: Name of the pipeline stage for identification
|
|
1125
|
+
show_samples: If True, show sample detection structure
|
|
1126
|
+
"""
|
|
1127
|
+
separator = "=" * 80
|
|
1128
|
+
print(f"\n{separator}")
|
|
1129
|
+
print(f"[DETECTION_STATS] Stage: {stage_name}")
|
|
1130
|
+
print(separator)
|
|
1131
|
+
|
|
1132
|
+
# Handle different data formats
|
|
1133
|
+
detections = []
|
|
1134
|
+
if isinstance(data, list):
|
|
1135
|
+
detections = data
|
|
1136
|
+
elif isinstance(data, dict):
|
|
1137
|
+
# Frame-based format
|
|
1138
|
+
for frame_id, frame_dets in data.items():
|
|
1139
|
+
if isinstance(frame_dets, list):
|
|
1140
|
+
detections.extend(frame_dets)
|
|
1141
|
+
|
|
1142
|
+
if not detections:
|
|
1143
|
+
print(f" Total Detections: 0")
|
|
1144
|
+
print(separator)
|
|
1145
|
+
return
|
|
1146
|
+
|
|
1147
|
+
# Calculate statistics
|
|
1148
|
+
total_count = len(detections)
|
|
1149
|
+
|
|
1150
|
+
# Count by category
|
|
1151
|
+
category_counts = {}
|
|
1152
|
+
confidence_sum = {}
|
|
1153
|
+
confidence_min = {}
|
|
1154
|
+
confidence_max = {}
|
|
1155
|
+
bbox_format_count = {"x1/y1/x2/y2": 0, "xmin/ymin/xmax/ymax": 0, "other": 0}
|
|
1156
|
+
|
|
1157
|
+
for det in detections:
|
|
1158
|
+
if not isinstance(det, dict):
|
|
1159
|
+
continue
|
|
1160
|
+
|
|
1161
|
+
# Category counting
|
|
1162
|
+
cat = det.get('category', 'UNKNOWN')
|
|
1163
|
+
category_counts[cat] = category_counts.get(cat, 0) + 1
|
|
1164
|
+
|
|
1165
|
+
# Confidence stats
|
|
1166
|
+
conf = det.get('confidence', 0.0)
|
|
1167
|
+
if cat not in confidence_sum:
|
|
1168
|
+
confidence_sum[cat] = 0.0
|
|
1169
|
+
confidence_min[cat] = conf
|
|
1170
|
+
confidence_max[cat] = conf
|
|
1171
|
+
confidence_sum[cat] += conf
|
|
1172
|
+
confidence_min[cat] = min(confidence_min[cat], conf)
|
|
1173
|
+
confidence_max[cat] = max(confidence_max[cat], conf)
|
|
1174
|
+
|
|
1175
|
+
# BBox format detection
|
|
1176
|
+
bbox = det.get('bounding_box', det.get('bbox', {}))
|
|
1177
|
+
if isinstance(bbox, dict):
|
|
1178
|
+
if 'x1' in bbox and 'y1' in bbox:
|
|
1179
|
+
bbox_format_count["x1/y1/x2/y2"] += 1
|
|
1180
|
+
elif 'xmin' in bbox and 'ymin' in bbox:
|
|
1181
|
+
bbox_format_count["xmin/ymin/xmax/ymax"] += 1
|
|
1182
|
+
else:
|
|
1183
|
+
bbox_format_count["other"] += 1
|
|
1184
|
+
|
|
1185
|
+
# Print summary
|
|
1186
|
+
print(f" Total Detections: {total_count}")
|
|
1187
|
+
print(f"\n Category Distribution:")
|
|
1188
|
+
|
|
1189
|
+
# Sort categories by count (descending)
|
|
1190
|
+
sorted_cats = sorted(category_counts.items(), key=lambda x: x[1], reverse=True)
|
|
1191
|
+
|
|
1192
|
+
for cat, count in sorted_cats:
|
|
1193
|
+
percentage = (count / total_count) * 100
|
|
1194
|
+
avg_conf = confidence_sum[cat] / count
|
|
1195
|
+
min_conf = confidence_min[cat]
|
|
1196
|
+
max_conf = confidence_max[cat]
|
|
1197
|
+
|
|
1198
|
+
print(f" [{cat:20s}] Count: {count:4d} ({percentage:5.1f}%) | "
|
|
1199
|
+
f"Conf: avg={avg_conf:.3f}, min={min_conf:.3f}, max={max_conf:.3f}")
|
|
1200
|
+
|
|
1201
|
+
# Print bbox format distribution
|
|
1202
|
+
print(f"\n BBox Format Distribution:")
|
|
1203
|
+
for fmt, count in bbox_format_count.items():
|
|
1204
|
+
if count > 0:
|
|
1205
|
+
percentage = (count / total_count) * 100
|
|
1206
|
+
print(f" {fmt:25s}: {count:4d} ({percentage:5.1f}%)")
|
|
1207
|
+
|
|
1208
|
+
# Show sample detection structure if requested
|
|
1209
|
+
if show_samples and detections:
|
|
1210
|
+
print(f"\n Sample Detection Structure:")
|
|
1211
|
+
sample = detections[0]
|
|
1212
|
+
print(f" Keys: {list(sample.keys())}")
|
|
1213
|
+
print(f" Category: {sample.get('category')} (type: {type(sample.get('category')).__name__})")
|
|
1214
|
+
print(f" Confidence: {sample.get('confidence')} (type: {type(sample.get('confidence')).__name__})")
|
|
1215
|
+
|
|
1216
|
+
bbox = sample.get('bounding_box', sample.get('bbox', {}))
|
|
1217
|
+
if isinstance(bbox, dict):
|
|
1218
|
+
print(f" BBox Keys: {list(bbox.keys())}")
|
|
1219
|
+
if bbox:
|
|
1220
|
+
first_key = list(bbox.keys())[0]
|
|
1221
|
+
print(f" BBox Coord Type: {type(bbox[first_key]).__name__}")
|
|
1222
|
+
|
|
1223
|
+
print(separator)
|