matrice-analytics 0.1.106__py3-none-any.whl → 0.1.124__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/post_processing/__init__.py +22 -0
- matrice_analytics/post_processing/config.py +15 -0
- matrice_analytics/post_processing/core/config.py +107 -1
- matrice_analytics/post_processing/face_reg/face_recognition.py +2 -2
- matrice_analytics/post_processing/post_processor.py +16 -0
- matrice_analytics/post_processing/usecases/__init__.py +9 -0
- matrice_analytics/post_processing/usecases/crowdflow.py +1088 -0
- matrice_analytics/post_processing/usecases/footfall.py +103 -62
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +2 -1
- matrice_analytics/post_processing/usecases/parking_lot_analytics.py +1137 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +30 -4
- matrice_analytics/post_processing/usecases/vehicle_monitoring_drone_view.py +33 -6
- matrice_analytics/post_processing/usecases/vehicle_monitoring_parking_lot.py +18 -2
- matrice_analytics/post_processing/usecases/vehicle_monitoring_wrong_way.py +1021 -0
- matrice_analytics/post_processing/utils/alert_instance_utils.py +18 -5
- matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +25 -2
- matrice_analytics/post_processing/utils/incident_manager_utils.py +12 -1
- matrice_analytics/post_processing/utils/parking_analytics_tracker.py +359 -0
- matrice_analytics/post_processing/utils/wrong_way_tracker.py +670 -0
- {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/RECORD +24 -19
- {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.106.dist-info → matrice_analytics-0.1.124.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1088 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import asdict
|
|
3
|
+
import time
|
|
4
|
+
import math
|
|
5
|
+
import numpy as np
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from collections import defaultdict, deque
|
|
8
|
+
|
|
9
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
|
10
|
+
from ..utils import (
|
|
11
|
+
filter_by_confidence,
|
|
12
|
+
filter_by_categories,
|
|
13
|
+
apply_category_mapping,
|
|
14
|
+
count_objects_by_category,
|
|
15
|
+
count_objects_in_zones,
|
|
16
|
+
calculate_counting_summary,
|
|
17
|
+
match_results_structure,
|
|
18
|
+
bbox_smoothing,
|
|
19
|
+
BBoxSmoothingConfig,
|
|
20
|
+
BBoxSmoothingTracker
|
|
21
|
+
)
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
24
|
+
|
|
25
|
+
class TrajectoryCorrector:
|
|
26
|
+
"""
|
|
27
|
+
Handles Velocity-Fusion logic to correct model orientation errors.
|
|
28
|
+
Stores history of track centers and applies EMA smoothing.
|
|
29
|
+
"""
|
|
30
|
+
def __init__(self):
|
|
31
|
+
# track_id -> { "centers": deque, "smooth_angle": float }
|
|
32
|
+
self.history = defaultdict(lambda: {
|
|
33
|
+
"centers": deque(maxlen=10),
|
|
34
|
+
"angles": deque(maxlen=5),
|
|
35
|
+
"smooth_angle": None
|
|
36
|
+
})
|
|
37
|
+
|
|
38
|
+
def get_direction_label(self, angle):
|
|
39
|
+
"""
|
|
40
|
+
Your custom logic for Front/Back/Left/Right
|
|
41
|
+
"""
|
|
42
|
+
if angle is None: return "unknown"
|
|
43
|
+
angle = angle % 360
|
|
44
|
+
if 45 <= angle < 135: return "back"
|
|
45
|
+
elif 135 <= angle < 225: return "left"
|
|
46
|
+
elif 225 <= angle < 315: return "front"
|
|
47
|
+
else: return "right"
|
|
48
|
+
|
|
49
|
+
def update_and_get_label(self, track_id, center, raw_angle_deg):
|
|
50
|
+
"""
|
|
51
|
+
1. Fixes Angle (+90)
|
|
52
|
+
2. Calculates Velocity
|
|
53
|
+
3. Applies EMA Smoothing
|
|
54
|
+
4. Returns (Smooth_Angle, Label_String)
|
|
55
|
+
"""
|
|
56
|
+
state = self.history[track_id]
|
|
57
|
+
state["centers"].append(center)
|
|
58
|
+
|
|
59
|
+
# --- FIX 1: ROTATE MODEL ANGLE ---
|
|
60
|
+
if raw_angle_deg is None: raw_angle_deg = 0.0
|
|
61
|
+
fixed_raw_angle = (raw_angle_deg + 90) % 360
|
|
62
|
+
state["angles"].append(fixed_raw_angle)
|
|
63
|
+
|
|
64
|
+
# --- FIX 2: CALCULATE VELOCITY ---
|
|
65
|
+
motion_angle = self._compute_motion_angle(state["centers"])
|
|
66
|
+
|
|
67
|
+
# Decide Target Angle
|
|
68
|
+
if motion_angle is not None:
|
|
69
|
+
target_angle = motion_angle
|
|
70
|
+
elif fixed_raw_angle is not None:
|
|
71
|
+
target_angle = fixed_raw_angle
|
|
72
|
+
elif state["smooth_angle"] is not None:
|
|
73
|
+
target_angle = state["smooth_angle"]
|
|
74
|
+
else:
|
|
75
|
+
target_angle = 0.0
|
|
76
|
+
|
|
77
|
+
# --- FIX 3: EMA SMOOTHING ---
|
|
78
|
+
alpha = 0.2
|
|
79
|
+
|
|
80
|
+
if state["smooth_angle"] is None:
|
|
81
|
+
state["smooth_angle"] = target_angle
|
|
82
|
+
else:
|
|
83
|
+
prev_rad = math.radians(state["smooth_angle"])
|
|
84
|
+
curr_rad = math.radians(target_angle)
|
|
85
|
+
|
|
86
|
+
new_sin = (1 - alpha) * math.sin(prev_rad) + alpha * math.sin(curr_rad)
|
|
87
|
+
new_cos = (1 - alpha) * math.cos(prev_rad) + alpha * math.cos(curr_rad)
|
|
88
|
+
|
|
89
|
+
state["smooth_angle"] = math.degrees(math.atan2(new_sin, new_cos)) % 360
|
|
90
|
+
|
|
91
|
+
final_angle = state["smooth_angle"]
|
|
92
|
+
label = self.get_direction_label(final_angle)
|
|
93
|
+
|
|
94
|
+
return final_angle, label
|
|
95
|
+
|
|
96
|
+
def _compute_motion_angle(self, centers):
|
|
97
|
+
if len(centers) < 2:
|
|
98
|
+
return None
|
|
99
|
+
|
|
100
|
+
# Look back 5 frames for stability
|
|
101
|
+
lookback = min(len(centers), 5)
|
|
102
|
+
(x_past, y_past) = centers[-lookback]
|
|
103
|
+
(x_now, y_now) = centers[-1]
|
|
104
|
+
|
|
105
|
+
dx = x_now - x_past
|
|
106
|
+
dy = y_now - y_past
|
|
107
|
+
|
|
108
|
+
# THRESHOLD: 2.5 pixels
|
|
109
|
+
if math.hypot(dx, dy) < 0.5:
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
return math.degrees(math.atan2(-dy, dx)) % 360
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
@dataclass
|
|
117
|
+
class CrowdflowConfig(BaseConfig):
|
|
118
|
+
"""Configuration for footfall use case."""
|
|
119
|
+
|
|
120
|
+
# Smoothing configuration
|
|
121
|
+
enable_smoothing: bool = False
|
|
122
|
+
smoothing_algorithm: str = "observability" # "window" or "observability"
|
|
123
|
+
smoothing_window_size: int = 20
|
|
124
|
+
smoothing_cooldown_frames: int = 5
|
|
125
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
126
|
+
|
|
127
|
+
# Zone configuration
|
|
128
|
+
zone_config: Optional[ZoneConfig] = None
|
|
129
|
+
|
|
130
|
+
# Counting parameters
|
|
131
|
+
enable_unique_counting: bool = True
|
|
132
|
+
time_window_minutes: int = 60
|
|
133
|
+
|
|
134
|
+
# Category mapping
|
|
135
|
+
person_categories: List[str] = field(default_factory=lambda: ["person"])
|
|
136
|
+
index_to_category: Optional[Dict[int, str]] = None
|
|
137
|
+
|
|
138
|
+
# Alert configuration
|
|
139
|
+
alert_config: Optional[AlertConfig] = None
|
|
140
|
+
|
|
141
|
+
target_categories: List[str] = field(
|
|
142
|
+
default_factory=lambda: ['person']
|
|
143
|
+
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
def validate(self) -> List[str]:
|
|
147
|
+
"""Validate people counting configuration."""
|
|
148
|
+
errors = super().validate()
|
|
149
|
+
|
|
150
|
+
if self.time_window_minutes <= 0:
|
|
151
|
+
errors.append("time_window_minutes must be positive")
|
|
152
|
+
|
|
153
|
+
if not self.person_categories:
|
|
154
|
+
errors.append("person_categories cannot be empty")
|
|
155
|
+
|
|
156
|
+
# Validate nested configurations
|
|
157
|
+
if self.zone_config:
|
|
158
|
+
errors.extend(self.zone_config.validate())
|
|
159
|
+
|
|
160
|
+
if self.alert_config:
|
|
161
|
+
errors.extend(self.alert_config.validate())
|
|
162
|
+
|
|
163
|
+
return errors
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class CrowdflowUseCase(BaseProcessor):
|
|
168
|
+
# CATEGORY_DISPLAY = {
|
|
169
|
+
# "person": "Person"
|
|
170
|
+
# }
|
|
171
|
+
|
|
172
|
+
def __init__(self):
|
|
173
|
+
super().__init__("crowdflow")
|
|
174
|
+
self.category = "retail"
|
|
175
|
+
self.CASE_TYPE: Optional[str] = 'crowdflow'
|
|
176
|
+
self.CASE_VERSION: Optional[str] = '1.1'
|
|
177
|
+
self.target_categories = ['person']
|
|
178
|
+
self.smoothing_tracker = None
|
|
179
|
+
self.tracker = None
|
|
180
|
+
|
|
181
|
+
self.GRID_SIZE = 180
|
|
182
|
+
self.MIN_TRACKS_PER_GRID = 6
|
|
183
|
+
|
|
184
|
+
self._track_trajectories = defaultdict(lambda: deque(maxlen=30))
|
|
185
|
+
|
|
186
|
+
self._grid_trajectories = defaultdict(
|
|
187
|
+
lambda: defaultdict(lambda: deque(maxlen=30))
|
|
188
|
+
)
|
|
189
|
+
# -------------------------------
|
|
190
|
+
self._flow_grid = defaultdict(lambda: {
|
|
191
|
+
"sum_sin": 0.0,
|
|
192
|
+
"sum_cos": 0.0,
|
|
193
|
+
"count": 0,
|
|
194
|
+
|
|
195
|
+
"frame_tracks": {
|
|
196
|
+
"front": set(),
|
|
197
|
+
"back": set(),
|
|
198
|
+
"left": set(),
|
|
199
|
+
"right": set()
|
|
200
|
+
},
|
|
201
|
+
|
|
202
|
+
"window_tracks": {
|
|
203
|
+
"front": set(),
|
|
204
|
+
"back": set(),
|
|
205
|
+
"left": set(),
|
|
206
|
+
"right": set()
|
|
207
|
+
}
|
|
208
|
+
})
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
# Initialize the Velocity Logic
|
|
212
|
+
self.trajectory_corrector = TrajectoryCorrector()
|
|
213
|
+
self._total_frame_counter = 0
|
|
214
|
+
self._global_frame_offset = 0
|
|
215
|
+
self._tracking_start_time = None
|
|
216
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
217
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
218
|
+
self._track_merge_iou_threshold: float = 0.05
|
|
219
|
+
self._track_merge_time_window: float = 7.0
|
|
220
|
+
self._ascending_alert_list: List[int] = []
|
|
221
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
222
|
+
self.start_timer = None
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def _compute_avg_displacement(self, trajectories, min_len=6):
|
|
226
|
+
starts, ends = [], []
|
|
227
|
+
|
|
228
|
+
for t in trajectories:
|
|
229
|
+
if len(t) >= min_len:
|
|
230
|
+
starts.append(t[0])
|
|
231
|
+
ends.append(t[-1])
|
|
232
|
+
|
|
233
|
+
if not starts:
|
|
234
|
+
return None
|
|
235
|
+
|
|
236
|
+
sx = int(sum(p[0] for p in starts) / len(starts))
|
|
237
|
+
sy = int(sum(p[1] for p in starts) / len(starts))
|
|
238
|
+
ex = int(sum(p[0] for p in ends) / len(ends))
|
|
239
|
+
ey = int(sum(p[1] for p in ends) / len(ends))
|
|
240
|
+
|
|
241
|
+
dx, dy = ex - sx, ey - sy
|
|
242
|
+
angle = math.degrees(math.atan2(-dy, dx)) % 360
|
|
243
|
+
|
|
244
|
+
return {
|
|
245
|
+
"start": (sx, sy),
|
|
246
|
+
"end": (ex, ey),
|
|
247
|
+
"angle": angle,
|
|
248
|
+
"track_count": len(starts)
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
|
252
|
+
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
253
|
+
processing_start = time.time()
|
|
254
|
+
if not isinstance(config, CrowdflowConfig):
|
|
255
|
+
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
|
|
256
|
+
if context is None:
|
|
257
|
+
context = ProcessingContext()
|
|
258
|
+
|
|
259
|
+
fps = 25
|
|
260
|
+
if stream_info:
|
|
261
|
+
fps = stream_info.get("input_settings", {}).get("original_fps", 25)
|
|
262
|
+
|
|
263
|
+
decay_interval = int(2 * fps)
|
|
264
|
+
|
|
265
|
+
# Temporal decay of window tracks (~2 sec behavior)
|
|
266
|
+
if self._total_frame_counter % decay_interval == 0:
|
|
267
|
+
for cell in self._flow_grid.values():
|
|
268
|
+
for d in cell["window_tracks"]:
|
|
269
|
+
cell["window_tracks"][d] = {
|
|
270
|
+
tid for tid in cell["window_tracks"][d]
|
|
271
|
+
if np.random.rand() < 0.90
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
# Reset per-frame snapshot FIRST
|
|
277
|
+
for cell in self._flow_grid.values():
|
|
278
|
+
for d in cell["frame_tracks"]:
|
|
279
|
+
cell["frame_tracks"][d].clear()
|
|
280
|
+
|
|
281
|
+
# Then apply temporal decay
|
|
282
|
+
for cell in self._flow_grid.values():
|
|
283
|
+
cell["sum_sin"] *= 0.92
|
|
284
|
+
cell["sum_cos"] *= 0.92
|
|
285
|
+
cell["count"] *= 0.92
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
input_format = match_results_structure(data)
|
|
290
|
+
context.input_format = input_format
|
|
291
|
+
context.confidence_threshold = config.confidence_threshold
|
|
292
|
+
|
|
293
|
+
# ... [Keep your standard filtering logic here: confidence, mapping, categories] ...
|
|
294
|
+
if config.confidence_threshold is not None:
|
|
295
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
296
|
+
else:
|
|
297
|
+
processed_data = data
|
|
298
|
+
|
|
299
|
+
if config.index_to_category:
|
|
300
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
301
|
+
|
|
302
|
+
if config.target_categories:
|
|
303
|
+
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
304
|
+
|
|
305
|
+
try:
|
|
306
|
+
from ..advanced_tracker import AdvancedTracker
|
|
307
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
308
|
+
if self.tracker is None:
|
|
309
|
+
tracker_config = TrackerConfig(
|
|
310
|
+
track_high_thresh=0.4,
|
|
311
|
+
track_low_thresh=0.05,
|
|
312
|
+
new_track_thresh=0.3,
|
|
313
|
+
match_thresh=0.8)
|
|
314
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
315
|
+
self.logger.info("Initialized AdvancedTracker for People Counting")
|
|
316
|
+
|
|
317
|
+
# 1. Run Standard Tracker (Assigns IDs)
|
|
318
|
+
processed_data = self.tracker.update(processed_data)
|
|
319
|
+
|
|
320
|
+
# =========================================================
|
|
321
|
+
# NEW: INJECT VELOCITY FUSION LOGIC (CORRECTED)
|
|
322
|
+
# =========================================================
|
|
323
|
+
for det in processed_data:
|
|
324
|
+
track_id = det.get("track_id")
|
|
325
|
+
|
|
326
|
+
# STREAM-SAFE bbox normalization
|
|
327
|
+
bbox = det.get("bbox") or det.get("bounding_box")
|
|
328
|
+
|
|
329
|
+
if isinstance(bbox, dict):
|
|
330
|
+
bbox = [
|
|
331
|
+
bbox.get("xmin"),
|
|
332
|
+
bbox.get("ymin"),
|
|
333
|
+
bbox.get("xmax"),
|
|
334
|
+
bbox.get("ymax"),
|
|
335
|
+
]
|
|
336
|
+
|
|
337
|
+
# Hard safety guard
|
|
338
|
+
if not bbox or len(bbox) < 4:
|
|
339
|
+
continue
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
# Check for 'raw_angle' (from predict.py) or 'orientation'
|
|
343
|
+
raw_angle = det.get("angle", det.get("raw_angle", det.get("orientation", 0.0)))
|
|
344
|
+
|
|
345
|
+
if track_id is not None and bbox:
|
|
346
|
+
# Calculate Center (cx, cy)
|
|
347
|
+
cx = int((bbox[0] + bbox[2]) / 2)
|
|
348
|
+
cy = int((bbox[1] + bbox[3]) / 2)
|
|
349
|
+
|
|
350
|
+
# Run Correction (Velocity + EMA + 90 Fix)
|
|
351
|
+
# FIX: Unpack both values (Angle AND Label)
|
|
352
|
+
final_angle, direction_label = self.trajectory_corrector.update_and_get_label(
|
|
353
|
+
track_id,
|
|
354
|
+
(cx, cy),
|
|
355
|
+
raw_angle
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
# OVERWRITE the detection angle
|
|
359
|
+
det["orientation"] = final_angle # For UI
|
|
360
|
+
det["angle"] = final_angle # For Analytics
|
|
361
|
+
|
|
362
|
+
# FIX: SAVE THE DIRECTION LABEL
|
|
363
|
+
det["direction"] = direction_label # "front", "back", etc.
|
|
364
|
+
|
|
365
|
+
# -------------------------------
|
|
366
|
+
# Crowd Flow Trajectory Capture
|
|
367
|
+
# -------------------------------
|
|
368
|
+
gx = int(cx // self.GRID_SIZE)
|
|
369
|
+
gy = int(cy // self.GRID_SIZE)
|
|
370
|
+
|
|
371
|
+
self._track_trajectories[track_id].append((cx, cy))
|
|
372
|
+
self._grid_trajectories[(gx, gy)][track_id].append((cx, cy))
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
cell = self._flow_grid[(gx, gy)]
|
|
376
|
+
|
|
377
|
+
rad = math.radians(final_angle)
|
|
378
|
+
cell["sum_cos"] += math.cos(rad)
|
|
379
|
+
cell["sum_sin"] += math.sin(rad)
|
|
380
|
+
cell["count"] += 1
|
|
381
|
+
|
|
382
|
+
cell["frame_tracks"][direction_label].add(track_id)
|
|
383
|
+
cell["window_tracks"][direction_label].add(track_id)
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
# =========================================================
|
|
387
|
+
|
|
388
|
+
except Exception as e:
|
|
389
|
+
self.logger.warning(f"AdvancedTracker/Velocity failed: {e}")
|
|
390
|
+
|
|
391
|
+
# ... [The rest of your process method remains exactly the same] ...
|
|
392
|
+
# CLEAN DEAD TRACKS (CRITICAL – prevents memory leak)
|
|
393
|
+
# ---------------------------------------------------------
|
|
394
|
+
alive_ids = {
|
|
395
|
+
d.get("track_id")
|
|
396
|
+
for d in processed_data
|
|
397
|
+
if d.get("track_id") is not None
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
# Clean grid-level trajectories
|
|
401
|
+
for grid_key in list(self._grid_trajectories.keys()):
|
|
402
|
+
for tid in list(self._grid_trajectories[grid_key].keys()):
|
|
403
|
+
if tid not in alive_ids:
|
|
404
|
+
del self._grid_trajectories[grid_key][tid]
|
|
405
|
+
|
|
406
|
+
# Clean global trajectory memory
|
|
407
|
+
for tid in list(self._track_trajectories.keys()):
|
|
408
|
+
if tid not in alive_ids:
|
|
409
|
+
self._track_trajectories.pop(tid, None)
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
self._update_tracking_state(processed_data)
|
|
413
|
+
|
|
414
|
+
frame_number = None
|
|
415
|
+
if stream_info:
|
|
416
|
+
input_settings = stream_info.get("input_settings", {})
|
|
417
|
+
start_frame = input_settings.get("start_frame")
|
|
418
|
+
end_frame = input_settings.get("end_frame")
|
|
419
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
420
|
+
frame_number = start_frame
|
|
421
|
+
|
|
422
|
+
general_counting_summary = calculate_counting_summary(data)
|
|
423
|
+
counting_summary = self._count_categories(processed_data, config)
|
|
424
|
+
total_counts = self.get_total_counts()
|
|
425
|
+
counting_summary['total_counts'] = total_counts
|
|
426
|
+
|
|
427
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
|
428
|
+
predictions = self._extract_predictions(processed_data)
|
|
429
|
+
|
|
430
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
|
431
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
|
432
|
+
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
|
|
433
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
434
|
+
|
|
435
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
436
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
437
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
438
|
+
summary = summary_list[0] if summary_list else {}
|
|
439
|
+
agg_summary = {str(frame_number): {
|
|
440
|
+
"incidents": incidents,
|
|
441
|
+
"tracking_stats": tracking_stats,
|
|
442
|
+
"business_analytics": business_analytics,
|
|
443
|
+
"alerts": alerts,
|
|
444
|
+
"human_text": summary}
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
context.mark_completed()
|
|
448
|
+
result = self.create_result(
|
|
449
|
+
data={"agg_summary": agg_summary},
|
|
450
|
+
usecase=self.name,
|
|
451
|
+
category=self.category,
|
|
452
|
+
context=context
|
|
453
|
+
)
|
|
454
|
+
proc_time = time.time() - processing_start
|
|
455
|
+
processing_latency_ms = proc_time * 1000.0
|
|
456
|
+
processing_fps = (1.0 / proc_time) if proc_time > 0 else None
|
|
457
|
+
# Log the performance metrics using the module-level logger
|
|
458
|
+
self._total_frame_counter += 1
|
|
459
|
+
print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
|
|
460
|
+
return result
|
|
461
|
+
|
|
462
|
+
def _check_alerts(self, summary: dict, frame_number: Any, config: CrowdflowConfig) -> List[Dict]:
|
|
463
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
|
464
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
|
465
|
+
if len(window) < 2:
|
|
466
|
+
return True
|
|
467
|
+
increasing = 0
|
|
468
|
+
total = 0
|
|
469
|
+
for i in range(1, len(window)):
|
|
470
|
+
if window[i] >= window[i - 1]:
|
|
471
|
+
increasing += 1
|
|
472
|
+
total += 1
|
|
473
|
+
ratio = increasing / total
|
|
474
|
+
return ratio >= threshold
|
|
475
|
+
|
|
476
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
477
|
+
alerts = []
|
|
478
|
+
total_detections = summary.get("total_count", 0)
|
|
479
|
+
total_counts_dict = summary.get("total_counts", {})
|
|
480
|
+
per_category_count = summary.get("per_category_count", {})
|
|
481
|
+
|
|
482
|
+
if not config.alert_config:
|
|
483
|
+
return alerts
|
|
484
|
+
|
|
485
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
486
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
487
|
+
if category == "all" and total_detections > threshold:
|
|
488
|
+
alerts.append({
|
|
489
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
490
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
491
|
+
"incident_category": self.CASE_TYPE,
|
|
492
|
+
"threshold_level": threshold,
|
|
493
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
494
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
495
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
496
|
+
})
|
|
497
|
+
elif category in per_category_count and per_category_count[category] > threshold:
|
|
498
|
+
alerts.append({
|
|
499
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
500
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
501
|
+
"incident_category": self.CASE_TYPE,
|
|
502
|
+
"threshold_level": threshold,
|
|
503
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
504
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
505
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
506
|
+
})
|
|
507
|
+
return alerts
|
|
508
|
+
|
|
509
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: CrowdflowConfig,
|
|
510
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
511
|
+
incidents = []
|
|
512
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
513
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
514
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
515
|
+
|
|
516
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
517
|
+
|
|
518
|
+
if total_detections > 0:
|
|
519
|
+
level = "low"
|
|
520
|
+
intensity = 5.0
|
|
521
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
522
|
+
if start_timestamp and self.current_incident_end_timestamp == 'N/A':
|
|
523
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
524
|
+
elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
|
|
525
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
526
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
527
|
+
elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
|
|
528
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
529
|
+
|
|
530
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
531
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
532
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
533
|
+
if intensity >= 9:
|
|
534
|
+
level = "critical"
|
|
535
|
+
self._ascending_alert_list.append(3)
|
|
536
|
+
elif intensity >= 7:
|
|
537
|
+
level = "significant"
|
|
538
|
+
self._ascending_alert_list.append(2)
|
|
539
|
+
elif intensity >= 5:
|
|
540
|
+
level = "medium"
|
|
541
|
+
self._ascending_alert_list.append(1)
|
|
542
|
+
else:
|
|
543
|
+
level = "low"
|
|
544
|
+
self._ascending_alert_list.append(0)
|
|
545
|
+
else:
|
|
546
|
+
if total_detections > 30:
|
|
547
|
+
level = "critical"
|
|
548
|
+
intensity = 10.0
|
|
549
|
+
self._ascending_alert_list.append(3)
|
|
550
|
+
elif total_detections > 25:
|
|
551
|
+
level = "significant"
|
|
552
|
+
intensity = 9.0
|
|
553
|
+
self._ascending_alert_list.append(2)
|
|
554
|
+
elif total_detections > 15:
|
|
555
|
+
level = "medium"
|
|
556
|
+
intensity = 7.0
|
|
557
|
+
self._ascending_alert_list.append(1)
|
|
558
|
+
else:
|
|
559
|
+
level = "low"
|
|
560
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
561
|
+
self._ascending_alert_list.append(0)
|
|
562
|
+
|
|
563
|
+
human_text_lines = [f"COUNTING INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
564
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
|
565
|
+
human_text = "\n".join(human_text_lines)
|
|
566
|
+
|
|
567
|
+
alert_settings = []
|
|
568
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
569
|
+
alert_settings.append({
|
|
570
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
571
|
+
"incident_category": self.CASE_TYPE,
|
|
572
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
573
|
+
"ascending": True,
|
|
574
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
575
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
576
|
+
})
|
|
577
|
+
|
|
578
|
+
event = self.create_incident(
|
|
579
|
+
incident_id=f"{self.CASE_TYPE}_{frame_number}",
|
|
580
|
+
incident_type=self.CASE_TYPE,
|
|
581
|
+
severity_level=level,
|
|
582
|
+
human_text=human_text,
|
|
583
|
+
camera_info=camera_info,
|
|
584
|
+
alerts=alerts,
|
|
585
|
+
alert_settings=alert_settings,
|
|
586
|
+
start_time=start_timestamp,
|
|
587
|
+
end_time=self.current_incident_end_timestamp,
|
|
588
|
+
level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
|
|
589
|
+
)
|
|
590
|
+
incidents.append(event)
|
|
591
|
+
else:
|
|
592
|
+
self._ascending_alert_list.append(0)
|
|
593
|
+
incidents.append({})
|
|
594
|
+
return incidents
|
|
595
|
+
|
|
596
|
+
def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: CrowdflowConfig,
|
|
597
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
598
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
599
|
+
tracking_stats = []
|
|
600
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
601
|
+
total_counts_dict = counting_summary.get("total_counts", {})
|
|
602
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
603
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
|
604
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
|
605
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
606
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
607
|
+
|
|
608
|
+
total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
|
|
609
|
+
current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
|
|
610
|
+
|
|
611
|
+
detections = []
|
|
612
|
+
for detection in counting_summary.get("detections", []):
|
|
613
|
+
bbox = detection.get("bounding_box", {})
|
|
614
|
+
category = detection.get("category", "person")
|
|
615
|
+
if detection.get("masks"):
|
|
616
|
+
segmentation = detection.get("masks", [])
|
|
617
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
618
|
+
elif detection.get("segmentation"):
|
|
619
|
+
segmentation = detection.get("segmentation")
|
|
620
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
621
|
+
elif detection.get("mask"):
|
|
622
|
+
segmentation = detection.get("mask")
|
|
623
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
624
|
+
else:
|
|
625
|
+
detection_obj = self.create_detection_object(category, bbox)
|
|
626
|
+
detections.append(detection_obj)
|
|
627
|
+
|
|
628
|
+
alert_settings = []
|
|
629
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
630
|
+
alert_settings.append({
|
|
631
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
632
|
+
"incident_category": self.CASE_TYPE,
|
|
633
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
634
|
+
"ascending": True,
|
|
635
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
636
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
637
|
+
})
|
|
638
|
+
|
|
639
|
+
human_text_lines = []
|
|
640
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
641
|
+
for cat, count in per_category_count.items():
|
|
642
|
+
human_text_lines.append(f"\t- People Detected: {count}")
|
|
643
|
+
human_text_lines.append("")
|
|
644
|
+
# human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
|
645
|
+
# for cat, count in total_counts_dict.items():
|
|
646
|
+
# if count > 0:
|
|
647
|
+
# human_text_lines.append("")
|
|
648
|
+
# human_text_lines.append(f"\t- Total unique people count: {count}")
|
|
649
|
+
# if alerts:
|
|
650
|
+
# for alert in alerts:
|
|
651
|
+
# human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
|
652
|
+
# else:
|
|
653
|
+
# human_text_lines.append("Alerts: None")
|
|
654
|
+
human_text = "\n".join(human_text_lines)
|
|
655
|
+
|
|
656
|
+
reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
|
|
657
|
+
tracking_stat = self.create_tracking_stats(
|
|
658
|
+
total_counts=total_counts,
|
|
659
|
+
current_counts=current_counts,
|
|
660
|
+
detections=detections,
|
|
661
|
+
human_text=human_text,
|
|
662
|
+
camera_info=camera_info,
|
|
663
|
+
alerts=alerts,
|
|
664
|
+
alert_settings=alert_settings,
|
|
665
|
+
reset_settings=reset_settings,
|
|
666
|
+
start_time=high_precision_start_timestamp,
|
|
667
|
+
reset_time=high_precision_reset_timestamp
|
|
668
|
+
)
|
|
669
|
+
# -------------------------------
|
|
670
|
+
# Crowd Flow Aggregation (GRID)
|
|
671
|
+
# -------------------------------
|
|
672
|
+
crowd_flow = []
|
|
673
|
+
|
|
674
|
+
for (gx, gy), tracks in self._grid_trajectories.items():
|
|
675
|
+
avg = self._compute_avg_displacement(tracks.values())
|
|
676
|
+
if not avg or avg["track_count"] < self.MIN_TRACKS_PER_GRID:
|
|
677
|
+
continue
|
|
678
|
+
|
|
679
|
+
cell = self._flow_grid[(gx, gy)]
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
front = len(cell["window_tracks"]["front"])
|
|
683
|
+
back = len(cell["window_tracks"]["back"])
|
|
684
|
+
left = len(cell["window_tracks"]["left"])
|
|
685
|
+
right = len(cell["window_tracks"]["right"])
|
|
686
|
+
|
|
687
|
+
conflict = (
|
|
688
|
+
(front >= self.MIN_TRACKS_PER_GRID and back >= self.MIN_TRACKS_PER_GRID) or
|
|
689
|
+
(left >= self.MIN_TRACKS_PER_GRID and right >= self.MIN_TRACKS_PER_GRID)
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
|
|
693
|
+
avg_direction_angle = None
|
|
694
|
+
if cell["count"] >= self.MIN_TRACKS_PER_GRID:
|
|
695
|
+
avg_direction_angle = (
|
|
696
|
+
math.degrees(math.atan2(cell["sum_sin"], cell["sum_cos"])) % 360
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
|
|
700
|
+
|
|
701
|
+
crowd_flow.append({
|
|
702
|
+
"grid_x": gx,
|
|
703
|
+
"grid_y": gy,
|
|
704
|
+
"conflict": conflict,
|
|
705
|
+
|
|
706
|
+
# 🔵 BLUE: average displacement (trajectory-based)
|
|
707
|
+
"avg_displacement": {
|
|
708
|
+
"start": avg["start"],
|
|
709
|
+
"end": avg["end"],
|
|
710
|
+
"angle": avg["angle"],
|
|
711
|
+
"track_count": avg["track_count"]
|
|
712
|
+
},
|
|
713
|
+
|
|
714
|
+
# 🟢 GREEN: directional flow aggregation
|
|
715
|
+
"direction_flow": {
|
|
716
|
+
"avg_angle": avg_direction_angle,
|
|
717
|
+
"counts": {
|
|
718
|
+
"front": len(cell["frame_tracks"]["front"]),
|
|
719
|
+
"back": len(cell["frame_tracks"]["back"]),
|
|
720
|
+
"left": len(cell["frame_tracks"]["left"]),
|
|
721
|
+
"right": len(cell["frame_tracks"]["right"]),
|
|
722
|
+
}
|
|
723
|
+
}
|
|
724
|
+
})
|
|
725
|
+
|
|
726
|
+
|
|
727
|
+
|
|
728
|
+
tracking_stat["crowd_flow"] = crowd_flow
|
|
729
|
+
|
|
730
|
+
tracking_stat['target_categories'] = self.target_categories
|
|
731
|
+
tracking_stats.append(tracking_stat)
|
|
732
|
+
return tracking_stats
|
|
733
|
+
|
|
734
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: CrowdflowConfig,
|
|
735
|
+
stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
|
736
|
+
if is_empty:
|
|
737
|
+
return []
|
|
738
|
+
|
|
739
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
|
740
|
+
"""
|
|
741
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
|
742
|
+
"""
|
|
743
|
+
lines = []
|
|
744
|
+
lines.append("Application Name: "+self.CASE_TYPE)
|
|
745
|
+
lines.append("Application Version: "+self.CASE_VERSION)
|
|
746
|
+
# if len(incidents) > 0:
|
|
747
|
+
# lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
|
|
748
|
+
if len(tracking_stats) > 0:
|
|
749
|
+
lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
|
|
750
|
+
if len(business_analytics) > 0:
|
|
751
|
+
lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
|
|
752
|
+
|
|
753
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
|
754
|
+
lines.append("Summary: "+"No Summary Data")
|
|
755
|
+
|
|
756
|
+
return ["\n".join(lines)]
|
|
757
|
+
|
|
758
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
|
759
|
+
frame_track_ids = set()
|
|
760
|
+
for det in detections:
|
|
761
|
+
tid = det.get('track_id')
|
|
762
|
+
if tid is not None:
|
|
763
|
+
frame_track_ids.add(tid)
|
|
764
|
+
total_track_ids = set()
|
|
765
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
|
766
|
+
total_track_ids.update(s)
|
|
767
|
+
return {
|
|
768
|
+
"total_count": len(total_track_ids),
|
|
769
|
+
"current_frame_count": len(frame_track_ids),
|
|
770
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
771
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
772
|
+
"last_update_time": time.time(),
|
|
773
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
def _update_tracking_state(self, detections: list):
|
|
777
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
|
778
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
|
779
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
780
|
+
|
|
781
|
+
for det in detections:
|
|
782
|
+
cat = det.get("category")
|
|
783
|
+
raw_track_id = det.get("track_id")
|
|
784
|
+
if cat not in self.target_categories or raw_track_id is None:
|
|
785
|
+
continue
|
|
786
|
+
bbox = det.get("bbox") or det.get("bounding_box")
|
|
787
|
+
|
|
788
|
+
if isinstance(bbox, dict):
|
|
789
|
+
bbox = [
|
|
790
|
+
bbox.get("xmin"),
|
|
791
|
+
bbox.get("ymin"),
|
|
792
|
+
bbox.get("xmax"),
|
|
793
|
+
bbox.get("ymax"),
|
|
794
|
+
]
|
|
795
|
+
|
|
796
|
+
if not bbox or len(bbox) < 4:
|
|
797
|
+
continue
|
|
798
|
+
|
|
799
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
800
|
+
|
|
801
|
+
det["track_id"] = canonical_id
|
|
802
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
803
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
|
804
|
+
|
|
805
|
+
def get_total_counts(self):
|
|
806
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
|
807
|
+
|
|
808
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
809
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
810
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
811
|
+
|
|
812
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
813
|
+
hours = int(timestamp // 3600)
|
|
814
|
+
minutes = int((timestamp % 3600) // 60)
|
|
815
|
+
seconds = round(float(timestamp % 60), 2)
|
|
816
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
817
|
+
|
|
818
|
+
def _format_timestamp(self, timestamp: Any) -> str:
|
|
819
|
+
"""Format a timestamp to match the current timestamp format: YYYY:MM:DD HH:MM:SS.
|
|
820
|
+
|
|
821
|
+
The input can be either:
|
|
822
|
+
1. A numeric Unix timestamp (``float`` / ``int``) – it will be converted to datetime.
|
|
823
|
+
2. A string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
|
|
824
|
+
|
|
825
|
+
The returned value will be in the format: YYYY:MM:DD HH:MM:SS (no milliseconds, no UTC suffix).
|
|
826
|
+
|
|
827
|
+
Example
|
|
828
|
+
-------
|
|
829
|
+
>>> self._format_timestamp("2025-10-27-19:31:20.187574 UTC")
|
|
830
|
+
'2025:10:27 19:31:20'
|
|
831
|
+
"""
|
|
832
|
+
|
|
833
|
+
# Convert numeric timestamps to datetime first
|
|
834
|
+
if isinstance(timestamp, (int, float)):
|
|
835
|
+
dt = datetime.fromtimestamp(timestamp, timezone.utc)
|
|
836
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
837
|
+
|
|
838
|
+
# Ensure we are working with a string from here on
|
|
839
|
+
if not isinstance(timestamp, str):
|
|
840
|
+
return str(timestamp)
|
|
841
|
+
|
|
842
|
+
# Remove ' UTC' suffix if present
|
|
843
|
+
timestamp_clean = timestamp.replace(' UTC', '').strip()
|
|
844
|
+
|
|
845
|
+
# Remove milliseconds if present (everything after the last dot)
|
|
846
|
+
if '.' in timestamp_clean:
|
|
847
|
+
timestamp_clean = timestamp_clean.split('.')[0]
|
|
848
|
+
|
|
849
|
+
# Parse the timestamp string and convert to desired format
|
|
850
|
+
try:
|
|
851
|
+
# Handle format: YYYY-MM-DD-HH:MM:SS
|
|
852
|
+
if timestamp_clean.count('-') >= 2:
|
|
853
|
+
# Replace first two dashes with colons for date part, third with space
|
|
854
|
+
parts = timestamp_clean.split('-')
|
|
855
|
+
if len(parts) >= 4:
|
|
856
|
+
# parts = ['2025', '10', '27', '19:31:20']
|
|
857
|
+
formatted = f"{parts[0]}:{parts[1]}:{parts[2]} {'-'.join(parts[3:])}"
|
|
858
|
+
return formatted
|
|
859
|
+
except Exception:
|
|
860
|
+
pass
|
|
861
|
+
|
|
862
|
+
# If parsing fails, return the cleaned string as-is
|
|
863
|
+
return timestamp_clean
|
|
864
|
+
|
|
865
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
866
|
+
"""Get formatted current timestamp based on stream type."""
|
|
867
|
+
|
|
868
|
+
if not stream_info:
|
|
869
|
+
return "00:00:00.00"
|
|
870
|
+
if precision:
|
|
871
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
872
|
+
if frame_id:
|
|
873
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
874
|
+
else:
|
|
875
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
876
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
877
|
+
|
|
878
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
879
|
+
else:
|
|
880
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
881
|
+
|
|
882
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
883
|
+
if frame_id:
|
|
884
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
885
|
+
else:
|
|
886
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
887
|
+
|
|
888
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
889
|
+
|
|
890
|
+
|
|
891
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
892
|
+
else:
|
|
893
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
894
|
+
if stream_time_str:
|
|
895
|
+
try:
|
|
896
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
897
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
898
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
899
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
900
|
+
except:
|
|
901
|
+
return self._format_timestamp_for_stream(time.time())
|
|
902
|
+
else:
|
|
903
|
+
return self._format_timestamp_for_stream(time.time())
|
|
904
|
+
|
|
905
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
906
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
907
|
+
if not stream_info:
|
|
908
|
+
return "00:00:00"
|
|
909
|
+
|
|
910
|
+
if precision:
|
|
911
|
+
if self.start_timer is None:
|
|
912
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
913
|
+
if not candidate or candidate == "NA":
|
|
914
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
915
|
+
self.start_timer = candidate
|
|
916
|
+
return self._format_timestamp(self.start_timer)
|
|
917
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
918
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
919
|
+
if not candidate or candidate == "NA":
|
|
920
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
921
|
+
self.start_timer = candidate
|
|
922
|
+
return self._format_timestamp(self.start_timer)
|
|
923
|
+
else:
|
|
924
|
+
return self._format_timestamp(self.start_timer)
|
|
925
|
+
|
|
926
|
+
if self.start_timer is None:
|
|
927
|
+
# Prefer direct input_settings.stream_time if available and not NA
|
|
928
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
929
|
+
if not candidate or candidate == "NA":
|
|
930
|
+
# Fallback to nested stream_info.stream_time used by current timestamp path
|
|
931
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
932
|
+
if stream_time_str:
|
|
933
|
+
try:
|
|
934
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
935
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
936
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
937
|
+
candidate = datetime.fromtimestamp(self._tracking_start_time, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
938
|
+
except:
|
|
939
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
940
|
+
else:
|
|
941
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
942
|
+
self.start_timer = candidate
|
|
943
|
+
return self._format_timestamp(self.start_timer)
|
|
944
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
945
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
946
|
+
if not candidate or candidate == "NA":
|
|
947
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
948
|
+
if stream_time_str:
|
|
949
|
+
try:
|
|
950
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
951
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
952
|
+
ts = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
953
|
+
candidate = datetime.fromtimestamp(ts, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
954
|
+
except:
|
|
955
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
956
|
+
else:
|
|
957
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
958
|
+
self.start_timer = candidate
|
|
959
|
+
return self._format_timestamp(self.start_timer)
|
|
960
|
+
|
|
961
|
+
else:
|
|
962
|
+
if self.start_timer is not None and self.start_timer != "NA":
|
|
963
|
+
return self._format_timestamp(self.start_timer)
|
|
964
|
+
|
|
965
|
+
if self._tracking_start_time is None:
|
|
966
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
967
|
+
if stream_time_str:
|
|
968
|
+
try:
|
|
969
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
970
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
971
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
972
|
+
except:
|
|
973
|
+
self._tracking_start_time = time.time()
|
|
974
|
+
else:
|
|
975
|
+
self._tracking_start_time = time.time()
|
|
976
|
+
|
|
977
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
978
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
979
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
980
|
+
|
|
981
|
+
def _count_categories(self, detections: list, config: CrowdflowConfig) -> dict:
|
|
982
|
+
counts = {}
|
|
983
|
+
for det in detections:
|
|
984
|
+
cat = det.get("direction") or "unknown"
|
|
985
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
986
|
+
return {
|
|
987
|
+
"total_count": sum(counts.values()),
|
|
988
|
+
"per_category_count": counts,
|
|
989
|
+
"detections": [
|
|
990
|
+
{
|
|
991
|
+
"bounding_box": det.get("bounding_box"),
|
|
992
|
+
"category": det.get("category"),
|
|
993
|
+
"direction": det.get("direction"),
|
|
994
|
+
"confidence": det.get("confidence"),
|
|
995
|
+
"track_id": det.get("track_id"),
|
|
996
|
+
"frame_id": det.get("frame_id"),
|
|
997
|
+
"angle": det.get("angle"),
|
|
998
|
+
"orientation": det.get("orientation") #for UI arrows
|
|
999
|
+
}
|
|
1000
|
+
for det in detections
|
|
1001
|
+
]
|
|
1002
|
+
}
|
|
1003
|
+
|
|
1004
|
+
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
|
1005
|
+
return [
|
|
1006
|
+
{
|
|
1007
|
+
"category": det.get("category", "unknown"),
|
|
1008
|
+
"confidence": det.get("confidence", 0.0),
|
|
1009
|
+
"bounding_box": det.get("bounding_box", {})
|
|
1010
|
+
}
|
|
1011
|
+
for det in detections
|
|
1012
|
+
]
|
|
1013
|
+
|
|
1014
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
1015
|
+
def _bbox_to_list(bbox):
|
|
1016
|
+
if bbox is None:
|
|
1017
|
+
return []
|
|
1018
|
+
if isinstance(bbox, list):
|
|
1019
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
1020
|
+
if isinstance(bbox, dict):
|
|
1021
|
+
if "xmin" in bbox:
|
|
1022
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
1023
|
+
if "x1" in bbox:
|
|
1024
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
1025
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
1026
|
+
return values[:4] if len(values) >= 4 else []
|
|
1027
|
+
return []
|
|
1028
|
+
|
|
1029
|
+
l1 = _bbox_to_list(box1)
|
|
1030
|
+
l2 = _bbox_to_list(box2)
|
|
1031
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
1032
|
+
return 0.0
|
|
1033
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
1034
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
1035
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
1036
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
1037
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
1038
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
1039
|
+
inter_x_min = max(x1_min, x2_min)
|
|
1040
|
+
inter_y_min = max(y1_min, y2_min)
|
|
1041
|
+
inter_x_max = min(x1_max, x2_max)
|
|
1042
|
+
inter_y_max = min(y1_max, y2_max)
|
|
1043
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
1044
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
1045
|
+
inter_area = inter_w * inter_h
|
|
1046
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
1047
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
1048
|
+
union_area = area1 + area2 - inter_area
|
|
1049
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
1050
|
+
|
|
1051
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
1052
|
+
if raw_id is None or bbox is None:
|
|
1053
|
+
return raw_id
|
|
1054
|
+
now = time.time()
|
|
1055
|
+
if raw_id in self._track_aliases:
|
|
1056
|
+
canonical_id = self._track_aliases[raw_id]
|
|
1057
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
1058
|
+
if track_info is not None:
|
|
1059
|
+
track_info["last_bbox"] = bbox
|
|
1060
|
+
track_info["last_update"] = now
|
|
1061
|
+
track_info["raw_ids"].add(raw_id)
|
|
1062
|
+
return canonical_id
|
|
1063
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
1064
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
1065
|
+
continue
|
|
1066
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
1067
|
+
if iou >= self._track_merge_iou_threshold:
|
|
1068
|
+
self._track_aliases[raw_id] = canonical_id
|
|
1069
|
+
info["last_bbox"] = bbox
|
|
1070
|
+
info["last_update"] = now
|
|
1071
|
+
info["raw_ids"].add(raw_id)
|
|
1072
|
+
return canonical_id
|
|
1073
|
+
canonical_id = raw_id
|
|
1074
|
+
self._track_aliases[raw_id] = canonical_id
|
|
1075
|
+
self._canonical_tracks[canonical_id] = {
|
|
1076
|
+
"last_bbox": bbox,
|
|
1077
|
+
"last_update": now,
|
|
1078
|
+
"raw_ids": {raw_id},
|
|
1079
|
+
}
|
|
1080
|
+
return canonical_id
|
|
1081
|
+
|
|
1082
|
+
def _get_tracking_start_time(self) -> str:
|
|
1083
|
+
if self._tracking_start_time is None:
|
|
1084
|
+
return "N/A"
|
|
1085
|
+
return self._format_timestamp(self._tracking_start_time)
|
|
1086
|
+
|
|
1087
|
+
def _set_tracking_start_time(self) -> None:
|
|
1088
|
+
self._tracking_start_time = time.time()
|