matrice 1.0.99245__py3-none-any.whl → 1.0.99246__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/utils/post_processing/__init__.py +10 -2
- matrice/deploy/utils/post_processing/config.py +2 -0
- matrice/deploy/utils/post_processing/core/__init__.py +1 -0
- matrice/deploy/utils/post_processing/core/config.py +66 -1
- matrice/deploy/utils/post_processing/core/config_utils.py +85 -6
- matrice/deploy/utils/post_processing/processor.py +7 -3
- matrice/deploy/utils/post_processing/usecases/__init__.py +3 -0
- matrice/deploy/utils/post_processing/usecases/intrusion_detection.py +4 -4
- matrice/deploy/utils/post_processing/usecases/proximity_detection.py +1559 -0
- {matrice-1.0.99245.dist-info → matrice-1.0.99246.dist-info}/METADATA +1 -1
- {matrice-1.0.99245.dist-info → matrice-1.0.99246.dist-info}/RECORD +14 -13
- {matrice-1.0.99245.dist-info → matrice-1.0.99246.dist-info}/WHEEL +0 -0
- {matrice-1.0.99245.dist-info → matrice-1.0.99246.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99245.dist-info → matrice-1.0.99246.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1559 @@
|
|
1
|
+
"""
|
2
|
+
Proximity Detection use case implementation.
|
3
|
+
|
4
|
+
This module provides a clean implementation of proximity detection functionality
|
5
|
+
with zone-based analysis, tracking, and alerting capabilities.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from typing import Any, Dict, List, Optional, Set
|
9
|
+
from dataclasses import asdict
|
10
|
+
import time
|
11
|
+
from datetime import datetime, timezone
|
12
|
+
|
13
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
14
|
+
from ..core.config import ProximityConfig, ZoneConfig, AlertConfig
|
15
|
+
from ..utils import (
|
16
|
+
filter_by_confidence,
|
17
|
+
filter_by_categories,
|
18
|
+
apply_category_mapping,
|
19
|
+
count_objects_by_category,
|
20
|
+
count_objects_in_zones,
|
21
|
+
calculate_counting_summary,
|
22
|
+
match_results_structure,
|
23
|
+
bbox_smoothing,
|
24
|
+
BBoxSmoothingConfig,
|
25
|
+
BBoxSmoothingTracker,
|
26
|
+
calculate_iou
|
27
|
+
)
|
28
|
+
from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
|
29
|
+
|
30
|
+
|
31
|
+
class ProximityUseCase(BaseProcessor):
|
32
|
+
"""Proximity Detection use case with zone analysis and alerting."""
|
33
|
+
|
34
|
+
def __init__(self):
|
35
|
+
"""Initialize Proximity Detection use case."""
|
36
|
+
super().__init__("proximity_detection")
|
37
|
+
self.category = "security"
|
38
|
+
self.CASE_TYPE: Optional[str] = 'proximity_detection'
|
39
|
+
self.CASE_VERSION: Optional[str] = '1.3'
|
40
|
+
|
41
|
+
# Track ID storage for total count calculation
|
42
|
+
self._total_track_ids = set() # Store all unique track IDs seen across calls
|
43
|
+
self._current_frame_track_ids = set() # Store track IDs from current frame
|
44
|
+
self._total_count = 0 # Cached total count
|
45
|
+
self._last_update_time = time.time() # Track when last updated
|
46
|
+
|
47
|
+
# Zone-based tracking storage
|
48
|
+
self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
|
49
|
+
self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
|
50
|
+
self._zone_current_counts = {} # zone_name -> current count in zone
|
51
|
+
self._zone_total_counts = {} # zone_name -> total count that have been in zone
|
52
|
+
|
53
|
+
# Frame counter for tracking total frames processed
|
54
|
+
self._total_frame_counter = 0 # Total frames processed across all calls
|
55
|
+
|
56
|
+
# Global frame offset for video chunk processing
|
57
|
+
self._global_frame_offset = 0 # Offset to add to local frame IDs for global frame numbering
|
58
|
+
self._frames_in_current_chunk = 0 # Number of frames in current chunk
|
59
|
+
|
60
|
+
# Initialize smoothing tracker
|
61
|
+
self.smoothing_tracker = None
|
62
|
+
|
63
|
+
# Track start time for "TOTAL SINCE" calculation
|
64
|
+
self._tracking_start_time = None
|
65
|
+
|
66
|
+
# --------------------------------------------------------------------- #
|
67
|
+
# Tracking aliasing structures to merge fragmented IDs #
|
68
|
+
# --------------------------------------------------------------------- #
|
69
|
+
# Maps raw tracker IDs generated by ByteTrack to a stable canonical ID
|
70
|
+
# that represents a real-world person. This helps avoid double counting
|
71
|
+
# when the tracker loses a target temporarily and assigns a new ID.
|
72
|
+
self._track_aliases: Dict[Any, Any] = {}
|
73
|
+
|
74
|
+
# Stores metadata about each canonical track such as its last seen
|
75
|
+
# bounding box, last update timestamp and all raw IDs that have been
|
76
|
+
# merged into it.
|
77
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
78
|
+
|
79
|
+
# IoU threshold above which two bounding boxes are considered to belong
|
80
|
+
# to the same person (empirically chosen; adjust in production if
|
81
|
+
# needed).
|
82
|
+
self._track_merge_iou_threshold: float = 0.04
|
83
|
+
|
84
|
+
# Only merge with canonical tracks that were updated within this time
|
85
|
+
# window (in seconds). This prevents accidentally merging tracks that
|
86
|
+
# left the scene long ago.
|
87
|
+
self._track_merge_time_window: float = 10.0
|
88
|
+
|
89
|
+
self._ascending_alert_list: List[int] = []
|
90
|
+
self.current_incident_end_timestamp: str = "N/A"
|
91
|
+
|
92
|
+
|
93
|
+
def process(self, data: Any, config: ConfigProtocol,
|
94
|
+
context: Optional[ProcessingContext] = None, stream_info: Optional[Any] = None) -> ProcessingResult:
|
95
|
+
"""
|
96
|
+
Process proximity detection use case - automatically detects single or multi-frame structure.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
data: Raw model output (detection or tracking format)
|
100
|
+
config: proximity detection configuration
|
101
|
+
context: Processing context
|
102
|
+
stream_info: Stream information containing frame details (optional)
|
103
|
+
|
104
|
+
Returns:
|
105
|
+
ProcessingResult: Processing result with standardized agg_summary structure
|
106
|
+
"""
|
107
|
+
start_time = time.time()
|
108
|
+
|
109
|
+
try:
|
110
|
+
# Ensure we have the right config type
|
111
|
+
if not isinstance(config, ProximityConfig):
|
112
|
+
return self.create_error_result(
|
113
|
+
"Invalid configuration type for proximity detection",
|
114
|
+
usecase=self.name,
|
115
|
+
category=self.category,
|
116
|
+
context=context
|
117
|
+
)
|
118
|
+
|
119
|
+
# Initialize processing context if not provided
|
120
|
+
if context is None:
|
121
|
+
context = ProcessingContext()
|
122
|
+
|
123
|
+
# Detect input format and frame structure
|
124
|
+
input_format = match_results_structure(data)
|
125
|
+
context.input_format = input_format
|
126
|
+
context.confidence_threshold = config.confidence_threshold
|
127
|
+
|
128
|
+
is_multi_frame = self.detect_frame_structure(data)
|
129
|
+
|
130
|
+
#self.logger.info(f"Processing people counting - Format: {input_format.value}, Multi-frame: {is_multi_frame}")
|
131
|
+
|
132
|
+
# Apply smoothing if enabled
|
133
|
+
if config.enable_smoothing and input_format == ResultFormat.OBJECT_TRACKING:
|
134
|
+
data = self._apply_smoothing(data, config)
|
135
|
+
|
136
|
+
# Process based on frame structure
|
137
|
+
if is_multi_frame:
|
138
|
+
|
139
|
+
return self._process_multi_frame(data, config, context, stream_info)
|
140
|
+
else:
|
141
|
+
return self._process_single_frame(data, config, context, stream_info)
|
142
|
+
|
143
|
+
except Exception as e:
|
144
|
+
self.logger.error(f"Proximity detection failed: {str(e)}", exc_info=True)
|
145
|
+
|
146
|
+
if context:
|
147
|
+
context.mark_completed()
|
148
|
+
|
149
|
+
return self.create_error_result(
|
150
|
+
str(e),
|
151
|
+
type(e).__name__,
|
152
|
+
usecase=self.name,
|
153
|
+
category=self.category,
|
154
|
+
context=context
|
155
|
+
)
|
156
|
+
|
157
|
+
def _process_multi_frame(self, data: Dict, config: ProximityConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
158
|
+
"""Process multi-frame data to generate frame-wise agg_summary."""
|
159
|
+
|
160
|
+
frame_incidents = {}
|
161
|
+
frame_tracking_stats = {}
|
162
|
+
frame_business_analytics = {}
|
163
|
+
frame_human_text = {}
|
164
|
+
frame_alerts = {}
|
165
|
+
|
166
|
+
# Increment total frame counter
|
167
|
+
frames_in_this_call = len(data)
|
168
|
+
self._total_frame_counter += frames_in_this_call
|
169
|
+
|
170
|
+
# Process each frame individually
|
171
|
+
for frame_key, frame_detections in data.items():
|
172
|
+
# Extract frame ID from tracking data
|
173
|
+
frame_id = self._extract_frame_id_from_tracking(frame_detections, frame_key)
|
174
|
+
global_frame_id = self.get_global_frame_id(frame_id)
|
175
|
+
|
176
|
+
# Process this single frame's detections
|
177
|
+
alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
|
178
|
+
frame_detections, config, global_frame_id, stream_info
|
179
|
+
)
|
180
|
+
incidents = incidents_list[0] if incidents_list else {}
|
181
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
182
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
183
|
+
summary = summary_list[0] if summary_list else {}
|
184
|
+
|
185
|
+
# Store frame-wise results
|
186
|
+
if incidents:
|
187
|
+
frame_incidents[global_frame_id] = incidents
|
188
|
+
if tracking_stats:
|
189
|
+
frame_tracking_stats[global_frame_id] = tracking_stats
|
190
|
+
if business_analytics:
|
191
|
+
frame_business_analytics[global_frame_id] = business_analytics
|
192
|
+
if summary:
|
193
|
+
frame_human_text[global_frame_id] = summary
|
194
|
+
if alerts:
|
195
|
+
frame_alerts[global_frame_id] = alerts
|
196
|
+
|
197
|
+
# Update global frame offset after processing this chunk
|
198
|
+
self.update_global_frame_offset(frames_in_this_call)
|
199
|
+
|
200
|
+
# Create frame-wise agg_summary
|
201
|
+
agg_summary = self.create_frame_wise_agg_summary(
|
202
|
+
frame_incidents, frame_tracking_stats, frame_business_analytics, frame_alerts,
|
203
|
+
frame_human_text=frame_human_text
|
204
|
+
)
|
205
|
+
|
206
|
+
# Mark processing as completed
|
207
|
+
context.mark_completed()
|
208
|
+
|
209
|
+
# Create result with standardized agg_summary
|
210
|
+
return self.create_result(
|
211
|
+
data={"agg_summary": agg_summary},
|
212
|
+
usecase=self.name,
|
213
|
+
category=self.category,
|
214
|
+
context=context
|
215
|
+
)
|
216
|
+
|
217
|
+
def _process_single_frame(self, data: Any, config: ProximityConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
218
|
+
"""Process single frame data and return standardized agg_summary."""
|
219
|
+
|
220
|
+
current_frame = stream_info.get("input_settings", {}).get("start_frame", "current_frame")
|
221
|
+
# Process frame data
|
222
|
+
alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
|
223
|
+
data, config, current_frame, stream_info
|
224
|
+
)
|
225
|
+
incidents = incidents_list[0] if incidents_list else {}
|
226
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
227
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
228
|
+
summary = summary_list[0] if summary_list else {}
|
229
|
+
|
230
|
+
# Create single-frame agg_summary
|
231
|
+
agg_summary = self.create_agg_summary(
|
232
|
+
current_frame, incidents, tracking_stats, business_analytics, alerts, human_text=summary
|
233
|
+
)
|
234
|
+
|
235
|
+
# Mark processing as completed
|
236
|
+
context.mark_completed()
|
237
|
+
|
238
|
+
# Create result with standardized agg_summary
|
239
|
+
return self.create_result(
|
240
|
+
data={"agg_summary": agg_summary},
|
241
|
+
usecase=self.name,
|
242
|
+
category=self.category,
|
243
|
+
context=context
|
244
|
+
)
|
245
|
+
|
246
|
+
|
247
|
+
def _process_frame_detections(self, frame_data: Any, config: ProximityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> tuple:
|
248
|
+
"""Process detections from a single frame and return standardized components."""
|
249
|
+
|
250
|
+
# Convert frame_data to list if it's not already
|
251
|
+
if isinstance(frame_data, list):
|
252
|
+
frame_detections = frame_data
|
253
|
+
else:
|
254
|
+
# Handle other formats as needed
|
255
|
+
frame_detections = []
|
256
|
+
|
257
|
+
# Step 1: Apply confidence filtering to this frame
|
258
|
+
if config.confidence_threshold is not None:
|
259
|
+
frame_detections = [d for d in frame_detections if d.get("confidence", 0) >= config.confidence_threshold]
|
260
|
+
|
261
|
+
# Step 2: Apply category mapping if provided
|
262
|
+
if config.index_to_category:
|
263
|
+
frame_detections = apply_category_mapping(frame_detections, config.index_to_category)
|
264
|
+
|
265
|
+
# Step 3: Filter to person categories
|
266
|
+
if config.person_categories:
|
267
|
+
frame_detections = [d for d in frame_detections if d.get("category") in config.person_categories]
|
268
|
+
|
269
|
+
# Step 4: Create counting summary for this frame
|
270
|
+
counting_summary = {
|
271
|
+
"total_objects": len(frame_detections),
|
272
|
+
"detections": frame_detections,
|
273
|
+
"categories": {}
|
274
|
+
}
|
275
|
+
|
276
|
+
# Count by category
|
277
|
+
for detection in frame_detections:
|
278
|
+
category = detection.get("category", "unknown")
|
279
|
+
counting_summary["categories"][category] = counting_summary["categories"].get(category, 0) + 1
|
280
|
+
|
281
|
+
# Step 5: Zone analysis for this frame
|
282
|
+
zone_analysis = {}
|
283
|
+
if config.zone_config and config.zone_config.zones:
|
284
|
+
# Convert single frame to format expected by count_objects_in_zones
|
285
|
+
frame_data = frame_detections #[frame_detections]
|
286
|
+
zone_analysis = count_objects_in_zones(frame_data, config.zone_config.zones)
|
287
|
+
|
288
|
+
# Update zone tracking with current frame data
|
289
|
+
if zone_analysis and config.enable_tracking:
|
290
|
+
enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, frame_detections, config)
|
291
|
+
# Merge enhanced zone analysis with original zone analysis
|
292
|
+
for zone_name, enhanced_data in enhanced_zone_analysis.items():
|
293
|
+
zone_analysis[zone_name] = enhanced_data
|
294
|
+
|
295
|
+
# Step 4.5: Always update tracking state (regardless of enable_unique_counting setting)
|
296
|
+
self._update_tracking_state(counting_summary)
|
297
|
+
|
298
|
+
# Step 5: Generate insights and alerts for this frame
|
299
|
+
alerts = self._check_alerts(counting_summary, zone_analysis, config, frame_id)
|
300
|
+
|
301
|
+
# Step 6: Generate summary and standardized agg_summary components for this frame
|
302
|
+
incidents = self._generate_incidents(counting_summary, zone_analysis, alerts, config, frame_id, stream_info)
|
303
|
+
tracking_stats = self._generate_tracking_stats(counting_summary, zone_analysis, config, frame_id=frame_id, alerts=alerts, stream_info=stream_info)
|
304
|
+
business_analytics = self._generate_business_analytics(counting_summary, zone_analysis, config, frame_id, stream_info, is_empty=True)
|
305
|
+
summary = self._generate_summary(counting_summary, incidents, tracking_stats, business_analytics, alerts)
|
306
|
+
|
307
|
+
# Return standardized components as tuple
|
308
|
+
return alerts, incidents, tracking_stats, business_analytics, summary
|
309
|
+
|
310
|
+
def _generate_incidents(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: ProximityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
311
|
+
"""Generate standardized incidents for the agg_summary structure."""
|
312
|
+
|
313
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
314
|
+
incidents = []
|
315
|
+
total_people = counting_summary.get("total_objects", 0)
|
316
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
|
317
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
318
|
+
|
319
|
+
alert_settings=[]
|
320
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
321
|
+
alert_settings.append({
|
322
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
323
|
+
"incident_category": self.CASE_TYPE,
|
324
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
325
|
+
"ascending": True,
|
326
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
327
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
328
|
+
}
|
329
|
+
})
|
330
|
+
|
331
|
+
if total_people > 0:
|
332
|
+
# Determine event level based on thresholds
|
333
|
+
|
334
|
+
level = "info"
|
335
|
+
intensity = 5.0
|
336
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
337
|
+
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
338
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
339
|
+
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
340
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
341
|
+
self.current_incident_end_timestamp = current_timestamp
|
342
|
+
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
343
|
+
self.current_incident_end_timestamp = 'N/A'
|
344
|
+
|
345
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
346
|
+
threshold = config.alert_config.count_thresholds.get("all", 10)
|
347
|
+
intensity = min(10.0, (total_people / threshold) * 10)
|
348
|
+
|
349
|
+
if intensity >= 9:
|
350
|
+
level = "critical"
|
351
|
+
self._ascending_alert_list.append(3)
|
352
|
+
elif intensity >= 7:
|
353
|
+
level = "significant"
|
354
|
+
self._ascending_alert_list.append(2)
|
355
|
+
elif intensity >= 5:
|
356
|
+
level = "medium"
|
357
|
+
self._ascending_alert_list.append(1)
|
358
|
+
else:
|
359
|
+
level = "low"
|
360
|
+
self._ascending_alert_list.append(0)
|
361
|
+
else:
|
362
|
+
if total_people > 30:
|
363
|
+
level = "critical"
|
364
|
+
intensity = 10.0
|
365
|
+
self._ascending_alert_list.append(3)
|
366
|
+
elif total_people > 25:
|
367
|
+
level = "significant"
|
368
|
+
intensity = 9.0
|
369
|
+
self._ascending_alert_list.append(2)
|
370
|
+
elif total_people > 15:
|
371
|
+
level = "medium"
|
372
|
+
intensity = 7.0
|
373
|
+
self._ascending_alert_list.append(1)
|
374
|
+
else:
|
375
|
+
level = "low"
|
376
|
+
intensity = min(10.0, total_people / 3.0)
|
377
|
+
self._ascending_alert_list.append(0)
|
378
|
+
|
379
|
+
# Generate human text in new format
|
380
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
381
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
382
|
+
human_text = "\n".join(human_text_lines)
|
383
|
+
|
384
|
+
# Main people counting incident
|
385
|
+
event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_id), incident_type=self.CASE_TYPE,
|
386
|
+
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
387
|
+
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
388
|
+
level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
|
389
|
+
incidents.append(event)
|
390
|
+
else:
|
391
|
+
self._ascending_alert_list.append(0)
|
392
|
+
incidents.append({})
|
393
|
+
|
394
|
+
# Add zone-specific events if applicable
|
395
|
+
if zone_analysis:
|
396
|
+
human_text_lines.append(f"\t- ZONE EVENTS:")
|
397
|
+
for zone_name, zone_count in zone_analysis.items():
|
398
|
+
zone_total = self._robust_zone_total(zone_count)
|
399
|
+
if zone_total > 0:
|
400
|
+
zone_intensity = min(10.0, zone_total / 5.0)
|
401
|
+
zone_level = "info"
|
402
|
+
if intensity >= 9:
|
403
|
+
level = "critical"
|
404
|
+
self._ascending_alert_list.append(3)
|
405
|
+
elif intensity >= 7:
|
406
|
+
level = "significant"
|
407
|
+
self._ascending_alert_list.append(2)
|
408
|
+
elif intensity >= 5:
|
409
|
+
level = "medium"
|
410
|
+
self._ascending_alert_list.append(1)
|
411
|
+
else:
|
412
|
+
level = "low"
|
413
|
+
self._ascending_alert_list.append(0)
|
414
|
+
|
415
|
+
if zone_total > 0:
|
416
|
+
human_text_lines.append(f"\t\t- Zone name: {zone_name}")
|
417
|
+
human_text_lines.append(f"\t\t\t- Total people in zone: {zone_total}")
|
418
|
+
# Main people counting incident
|
419
|
+
event= self.create_incident(incident_id=self.CASE_TYPE+'_'+'zone_'+zone_name+str(frame_id), incident_type=self.CASE_TYPE,
|
420
|
+
severity_level=zone_level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
421
|
+
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
422
|
+
level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
|
423
|
+
incidents.append(event)
|
424
|
+
return incidents
|
425
|
+
|
426
|
+
def _generate_tracking_stats(self, counting_summary: Dict, zone_analysis: Dict, config: ProximityConfig, frame_id: str, alerts: Any=[], stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
427
|
+
"""Generate tracking stats using standardized methods."""
|
428
|
+
|
429
|
+
total_people = counting_summary.get("total_objects", 0)
|
430
|
+
|
431
|
+
# Get total count from cached tracking state
|
432
|
+
total_unique_count = self.get_total_count()
|
433
|
+
current_frame_count = self.get_current_frame_count()
|
434
|
+
|
435
|
+
# Get camera info using standardized method
|
436
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
437
|
+
|
438
|
+
# Build total_counts using standardized method
|
439
|
+
total_counts = []
|
440
|
+
per_category_total = {}
|
441
|
+
|
442
|
+
for category in config.person_categories or ["person"]:
|
443
|
+
# Get count for this category from zone analysis or counting summary
|
444
|
+
category_total_count = 0
|
445
|
+
if zone_analysis:
|
446
|
+
for zone_data in zone_analysis.values():
|
447
|
+
if isinstance(zone_data, dict) and "total_count" in zone_data:
|
448
|
+
category_total_count += zone_data.get("total_count", 0)
|
449
|
+
elif isinstance(zone_data, dict):
|
450
|
+
# Sum up zone counts
|
451
|
+
for v in zone_data.values():
|
452
|
+
if isinstance(v, int):
|
453
|
+
category_total_count += v
|
454
|
+
elif isinstance(v, list):
|
455
|
+
category_total_count += len(v)
|
456
|
+
elif isinstance(zone_data, (int, list)):
|
457
|
+
category_total_count += len(zone_data) if isinstance(zone_data, list) else zone_data
|
458
|
+
else:
|
459
|
+
# Use total unique count from tracking state
|
460
|
+
category_total_count = total_unique_count
|
461
|
+
|
462
|
+
if category_total_count > 0:
|
463
|
+
total_counts.append(self.create_count_object(category, category_total_count))
|
464
|
+
per_category_total[category] = category_total_count
|
465
|
+
|
466
|
+
# Build current_counts using standardized method
|
467
|
+
current_counts = []
|
468
|
+
per_category_current = {}
|
469
|
+
|
470
|
+
for category in config.person_categories or ["person"]:
|
471
|
+
# Get current count for this category
|
472
|
+
category_current_count = 0
|
473
|
+
if zone_analysis:
|
474
|
+
for zone_data in zone_analysis.values():
|
475
|
+
if isinstance(zone_data, dict) and "current_count" in zone_data:
|
476
|
+
category_current_count += zone_data.get("current_count", 0)
|
477
|
+
elif isinstance(zone_data, dict):
|
478
|
+
# For current frame, look at detections count
|
479
|
+
for v in zone_data.values():
|
480
|
+
if isinstance(v, int):
|
481
|
+
category_current_count += v
|
482
|
+
elif isinstance(v, list):
|
483
|
+
category_current_count += len(v)
|
484
|
+
elif isinstance(zone_data, (int, list)):
|
485
|
+
category_current_count += len(zone_data) if isinstance(zone_data, list) else zone_data
|
486
|
+
else:
|
487
|
+
# Count detections in current frame for this category
|
488
|
+
detections = counting_summary.get("detections", [])
|
489
|
+
category_current_count = sum(1 for d in detections if d.get("category") == category)
|
490
|
+
|
491
|
+
if category_current_count > 0 or total_people > 0: # Include even if 0 when there are people
|
492
|
+
current_counts.append(self.create_count_object(category, category_current_count))
|
493
|
+
per_category_current[category] = category_current_count
|
494
|
+
|
495
|
+
# Prepare detections using standardized method (without confidence and track_id)
|
496
|
+
detections = []
|
497
|
+
for detection in counting_summary.get("detections", []):
|
498
|
+
bbox = detection.get("bounding_box", {})
|
499
|
+
category = detection.get("category", "person")
|
500
|
+
# Include segmentation if available (like in eg.json)
|
501
|
+
if detection.get("masks"):
|
502
|
+
segmentation= detection.get("masks", [])
|
503
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
504
|
+
elif detection.get("segmentation"):
|
505
|
+
segmentation= detection.get("segmentation")
|
506
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
507
|
+
elif detection.get("mask"):
|
508
|
+
segmentation= detection.get("mask")
|
509
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
510
|
+
else:
|
511
|
+
detection_obj = self.create_detection_object(category, bbox)
|
512
|
+
detections.append(detection_obj)
|
513
|
+
|
514
|
+
# Build alerts and alert_settings arrays
|
515
|
+
alert_settings = []
|
516
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
517
|
+
alert_settings.append({
|
518
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
519
|
+
"incident_category": self.CASE_TYPE,
|
520
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
521
|
+
"ascending": True,
|
522
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
523
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
524
|
+
}
|
525
|
+
})
|
526
|
+
if zone_analysis:
|
527
|
+
human_text_lines=[]
|
528
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
|
529
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
530
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
531
|
+
def robust_zone_total(zone_count):
|
532
|
+
if isinstance(zone_count, dict):
|
533
|
+
total = 0
|
534
|
+
for v in zone_count.values():
|
535
|
+
if isinstance(v, int):
|
536
|
+
total += v
|
537
|
+
elif isinstance(v, list) and total==0:
|
538
|
+
total += len(v)
|
539
|
+
return total
|
540
|
+
elif isinstance(zone_count, list):
|
541
|
+
return len(zone_count)
|
542
|
+
elif isinstance(zone_count, int):
|
543
|
+
return zone_count
|
544
|
+
else:
|
545
|
+
return 0
|
546
|
+
human_text_lines.append(f"\t- People Detected: {total_people}")
|
547
|
+
human_text_lines.append("")
|
548
|
+
human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
|
549
|
+
|
550
|
+
for zone_name, zone_count in zone_analysis.items():
|
551
|
+
zone_total = robust_zone_total(zone_count)
|
552
|
+
human_text_lines.append(f"\t- Zone name: {zone_name}")
|
553
|
+
human_text_lines.append(f"\t\t- Total count in zone: {zone_total-1}")
|
554
|
+
|
555
|
+
if total_unique_count > 0:
|
556
|
+
human_text_lines.append(f"\t- Total unique people in the scene: {total_unique_count}")
|
557
|
+
if alerts:
|
558
|
+
for alert in alerts:
|
559
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
560
|
+
else:
|
561
|
+
human_text_lines.append("Alerts: None")
|
562
|
+
human_text = "\n".join(human_text_lines)
|
563
|
+
else:
|
564
|
+
human_text = self._generate_human_text_for_tracking(total_people, total_unique_count, config, frame_id, alerts, stream_info)
|
565
|
+
|
566
|
+
# Create high precision timestamps for input_timestamp and reset_timestamp
|
567
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
|
568
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
569
|
+
# Create tracking_stat using standardized method
|
570
|
+
tracking_stat = self.create_tracking_stats(
|
571
|
+
total_counts, current_counts, detections, human_text, camera_info, alerts, alert_settings, start_time=high_precision_start_timestamp, reset_time=high_precision_reset_timestamp
|
572
|
+
)
|
573
|
+
|
574
|
+
return [tracking_stat]
|
575
|
+
|
576
|
+
def _generate_human_text_for_tracking(self, total_people: int, total_unique_count: int, config: ProximityConfig, frame_id: str, alerts:Any=[], stream_info: Optional[Dict[str, Any]] = None) -> str:
|
577
|
+
"""Generate human-readable text for tracking stats in old format."""
|
578
|
+
from datetime import datetime, timezone
|
579
|
+
|
580
|
+
human_text_lines=[]
|
581
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
|
582
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
583
|
+
|
584
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
585
|
+
human_text_lines.append(f"\t- People Detected: {total_people}")
|
586
|
+
|
587
|
+
human_text_lines.append("")
|
588
|
+
if total_unique_count > 0:
|
589
|
+
human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
|
590
|
+
human_text_lines.append(f"\t- Total unique people count: {total_unique_count}")
|
591
|
+
|
592
|
+
if alerts:
|
593
|
+
for alert in alerts:
|
594
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
595
|
+
else:
|
596
|
+
human_text_lines.append("Alerts: None")
|
597
|
+
|
598
|
+
return "\n".join(human_text_lines)
|
599
|
+
|
600
|
+
def _check_alerts(self, counting_summary: Dict, zone_analysis: Dict,
|
601
|
+
config: ProximityConfig, frame_id: str) -> List[Dict]:
|
602
|
+
"""Check for alert conditions and generate alerts."""
|
603
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
604
|
+
'''
|
605
|
+
Determine if the trend is ascending or descending based on actual value progression.
|
606
|
+
Now works with values 0,1,2,3 (not just binary).
|
607
|
+
'''
|
608
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
609
|
+
if len(window) < 2:
|
610
|
+
return True # not enough data to determine trend
|
611
|
+
increasing = 0
|
612
|
+
total = 0
|
613
|
+
for i in range(1, len(window)):
|
614
|
+
if window[i] >= window[i - 1]:
|
615
|
+
increasing += 1
|
616
|
+
total += 1
|
617
|
+
ratio = increasing / total
|
618
|
+
if ratio >= threshold:
|
619
|
+
return True
|
620
|
+
elif ratio <= (1 - threshold):
|
621
|
+
return False
|
622
|
+
alerts = []
|
623
|
+
|
624
|
+
if not config.alert_config:
|
625
|
+
return alerts
|
626
|
+
|
627
|
+
total_people = counting_summary.get("total_objects", 0)
|
628
|
+
|
629
|
+
# Count threshold alerts
|
630
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
631
|
+
|
632
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
633
|
+
if category == "all" and total_people >= threshold:
|
634
|
+
|
635
|
+
alerts.append({
|
636
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
637
|
+
"alert_id": "alert_"+category+'_'+frame_id,
|
638
|
+
"incident_category": self.CASE_TYPE,
|
639
|
+
"threshold_level": threshold,
|
640
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
641
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
642
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
643
|
+
}
|
644
|
+
})
|
645
|
+
elif category in counting_summary.get("by_category", {}):
|
646
|
+
count = counting_summary["by_category"][category]
|
647
|
+
|
648
|
+
if count >= threshold:
|
649
|
+
alerts.append({
|
650
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
651
|
+
"alert_id": "alert_"+category+'_'+frame_id,
|
652
|
+
"incident_category": self.CASE_TYPE,
|
653
|
+
"threshold_level": threshold,
|
654
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
655
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
656
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
657
|
+
}
|
658
|
+
})
|
659
|
+
else:
|
660
|
+
pass
|
661
|
+
|
662
|
+
# Zone occupancy threshold alerts
|
663
|
+
if config.alert_config.occupancy_thresholds:
|
664
|
+
for zone_name, threshold in config.alert_config.occupancy_thresholds.items():
|
665
|
+
if zone_name in zone_analysis:
|
666
|
+
# Calculate zone_count robustly (supports int, list, dict values)
|
667
|
+
print('ZONEEE',zone_name, zone_analysis[zone_name])
|
668
|
+
zone_count = self._robust_zone_total(zone_analysis[zone_name])
|
669
|
+
if zone_count >= threshold:
|
670
|
+
alerts.append({
|
671
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
672
|
+
"alert_id": f"alert_zone_{zone_name}_{frame_id}",
|
673
|
+
"incident_category": f"{self.CASE_TYPE}_{zone_name}",
|
674
|
+
"threshold_level": threshold,
|
675
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
676
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
677
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
678
|
+
}
|
679
|
+
})
|
680
|
+
|
681
|
+
return alerts
|
682
|
+
|
683
|
+
def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: ProximityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
684
|
+
"""Generate standardized business analytics for the agg_summary structure."""
|
685
|
+
if is_empty:
|
686
|
+
return []
|
687
|
+
business_analytics = []
|
688
|
+
|
689
|
+
total_people = counting_summary.get("total_objects", 0)
|
690
|
+
|
691
|
+
# Get camera info using standardized method
|
692
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
693
|
+
|
694
|
+
if total_people > 0 or config.enable_analytics:
|
695
|
+
# Calculate analytics statistics
|
696
|
+
analytics_stats = {
|
697
|
+
"people_count": total_people,
|
698
|
+
"unique_people_count": self.get_total_count(),
|
699
|
+
"current_frame_count": self.get_current_frame_count()
|
700
|
+
}
|
701
|
+
|
702
|
+
# Add zone analytics if available
|
703
|
+
if zone_analysis:
|
704
|
+
zone_stats = {}
|
705
|
+
for zone_name, zone_count in zone_analysis.items():
|
706
|
+
zone_total = self._robust_zone_total(zone_count)
|
707
|
+
zone_stats[f"{zone_name}_occupancy"] = zone_total
|
708
|
+
analytics_stats.update(zone_stats)
|
709
|
+
|
710
|
+
# Generate human text for analytics
|
711
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
|
712
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
713
|
+
|
714
|
+
analytics_human_text = self.generate_analytics_human_text(
|
715
|
+
"people_counting_analytics", analytics_stats, current_timestamp, start_timestamp
|
716
|
+
)
|
717
|
+
|
718
|
+
# Create business analytics using standardized method
|
719
|
+
analytics = self.create_business_analytics(
|
720
|
+
"people_counting_analytics", analytics_stats, analytics_human_text, camera_info
|
721
|
+
)
|
722
|
+
business_analytics.append(analytics)
|
723
|
+
|
724
|
+
return business_analytics
|
725
|
+
|
726
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
727
|
+
"""
|
728
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
729
|
+
"""
|
730
|
+
lines = {}
|
731
|
+
lines["Application Name"] = self.CASE_TYPE
|
732
|
+
lines["Application Version"] = self.CASE_VERSION
|
733
|
+
if len(incidents) > 0:
|
734
|
+
lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
735
|
+
if len(tracking_stats) > 0:
|
736
|
+
lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
737
|
+
if len(business_analytics) > 0:
|
738
|
+
lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
739
|
+
|
740
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
741
|
+
lines["Summary"] = "No Summary Data"
|
742
|
+
|
743
|
+
return [lines]
|
744
|
+
|
745
|
+
def _calculate_metrics(self, counting_summary: Dict, zone_analysis: Dict,
|
746
|
+
config: ProximityConfig, context: ProcessingContext) -> Dict[str, Any]:
|
747
|
+
"""Calculate detailed metrics for analytics."""
|
748
|
+
total_people = counting_summary.get("total_objects", 0)
|
749
|
+
|
750
|
+
metrics = {
|
751
|
+
"total_people": total_people,
|
752
|
+
"processing_time": context.processing_time or 0.0,
|
753
|
+
"input_format": context.input_format.value,
|
754
|
+
"confidence_threshold": config.confidence_threshold,
|
755
|
+
"zones_analyzed": len(zone_analysis),
|
756
|
+
"detection_rate": 0.0,
|
757
|
+
"coverage_percentage": 0.0
|
758
|
+
}
|
759
|
+
|
760
|
+
# Calculate detection rate
|
761
|
+
if config.time_window_minutes and config.time_window_minutes > 0:
|
762
|
+
metrics["detection_rate"] = (total_people / config.time_window_minutes) * 60
|
763
|
+
|
764
|
+
# Calculate zone coverage
|
765
|
+
if zone_analysis and total_people > 0:
|
766
|
+
people_in_zones = 0
|
767
|
+
for zone_counts in zone_analysis.values():
|
768
|
+
if isinstance(zone_counts, dict):
|
769
|
+
for v in zone_counts.values():
|
770
|
+
if isinstance(v, int):
|
771
|
+
people_in_zones += v
|
772
|
+
elif isinstance(v, list):
|
773
|
+
people_in_zones += len(v)
|
774
|
+
elif isinstance(zone_counts, list):
|
775
|
+
people_in_zones += len(zone_counts)
|
776
|
+
elif isinstance(zone_counts, int):
|
777
|
+
people_in_zones += zone_counts
|
778
|
+
metrics["coverage_percentage"] = (people_in_zones / total_people) * 100
|
779
|
+
|
780
|
+
# Unique tracking metrics
|
781
|
+
if config.enable_unique_counting:
|
782
|
+
unique_count = self._count_unique_tracks(counting_summary, config)
|
783
|
+
if unique_count is not None:
|
784
|
+
metrics["unique_people"] = unique_count
|
785
|
+
metrics["tracking_efficiency"] = (unique_count / total_people) * 100 if total_people > 0 else 0
|
786
|
+
|
787
|
+
# Per-zone metrics
|
788
|
+
if zone_analysis:
|
789
|
+
zone_metrics = {}
|
790
|
+
for zone_name, zone_counts in zone_analysis.items():
|
791
|
+
# Robustly sum counts, handling dicts with int or list values
|
792
|
+
if isinstance(zone_counts, dict):
|
793
|
+
zone_total = 0
|
794
|
+
for v in zone_counts.values():
|
795
|
+
if isinstance(v, int):
|
796
|
+
zone_total += v
|
797
|
+
elif isinstance(v, list):
|
798
|
+
zone_total += len(v)
|
799
|
+
elif isinstance(zone_counts, list):
|
800
|
+
zone_total = len(zone_counts)
|
801
|
+
elif isinstance(zone_counts, int):
|
802
|
+
zone_total = zone_counts
|
803
|
+
else:
|
804
|
+
zone_total = 0
|
805
|
+
zone_metrics[zone_name] = {
|
806
|
+
"count": zone_total,
|
807
|
+
"percentage": (zone_total / total_people) * 100 if total_people > 0 else 0
|
808
|
+
}
|
809
|
+
metrics["zone_metrics"] = zone_metrics
|
810
|
+
|
811
|
+
return metrics
|
812
|
+
|
813
|
+
def _extract_predictions(self, data: Any) -> List[Dict[str, Any]]:
|
814
|
+
"""Extract predictions from processed data for API compatibility."""
|
815
|
+
predictions = []
|
816
|
+
|
817
|
+
try:
|
818
|
+
if isinstance(data, list):
|
819
|
+
# Detection format
|
820
|
+
for item in data:
|
821
|
+
prediction = self._normalize_prediction(item)
|
822
|
+
if prediction:
|
823
|
+
predictions.append(prediction)
|
824
|
+
|
825
|
+
elif isinstance(data, dict):
|
826
|
+
# Frame-based or tracking format
|
827
|
+
for frame_id, items in data.items():
|
828
|
+
if isinstance(items, list):
|
829
|
+
for item in items:
|
830
|
+
prediction = self._normalize_prediction(item)
|
831
|
+
if prediction:
|
832
|
+
prediction["frame_id"] = frame_id
|
833
|
+
predictions.append(prediction)
|
834
|
+
|
835
|
+
except Exception as e:
|
836
|
+
self.logger.warning(f"Failed to extract predictions: {str(e)}")
|
837
|
+
|
838
|
+
return predictions
|
839
|
+
|
840
|
+
def _normalize_prediction(self, item: Dict[str, Any]) -> Dict[str, Any]:
|
841
|
+
"""Normalize a single prediction item."""
|
842
|
+
if not isinstance(item, dict):
|
843
|
+
return {}
|
844
|
+
|
845
|
+
return {
|
846
|
+
"category": item.get("category", item.get("class", "unknown")),
|
847
|
+
"confidence": item.get("confidence", item.get("score", 0.0)),
|
848
|
+
"bounding_box": item.get("bounding_box", item.get("bbox", {})),
|
849
|
+
"track_id": item.get("track_id")
|
850
|
+
}
|
851
|
+
|
852
|
+
def _get_detections_with_confidence(self, counting_summary: Dict) -> List[Dict]:
|
853
|
+
"""Extract detection items with confidence scores."""
|
854
|
+
return counting_summary.get("detections", [])
|
855
|
+
|
856
|
+
def _count_unique_tracks(self, counting_summary: Dict, config: ProximityConfig = None) -> Optional[int]:
|
857
|
+
"""Count unique tracks if tracking is enabled."""
|
858
|
+
# Always update tracking state regardless of enable_unique_counting setting
|
859
|
+
self._update_tracking_state(counting_summary)
|
860
|
+
|
861
|
+
# Only return the count if unique counting is enabled
|
862
|
+
if config and config.enable_unique_counting:
|
863
|
+
return self._total_count if self._total_count > 0 else None
|
864
|
+
else:
|
865
|
+
return None
|
866
|
+
|
867
|
+
def _update_tracking_state(self, counting_summary: Dict) -> None:
|
868
|
+
"""Update tracking state with current frame data (always called)."""
|
869
|
+
detections = self._get_detections_with_confidence(counting_summary)
|
870
|
+
|
871
|
+
if not detections:
|
872
|
+
return
|
873
|
+
|
874
|
+
# Map raw tracker IDs to canonical IDs to avoid duplicate counting
|
875
|
+
current_frame_tracks: Set[Any] = set()
|
876
|
+
|
877
|
+
for detection in detections:
|
878
|
+
raw_track_id = detection.get("track_id")
|
879
|
+
if raw_track_id is None:
|
880
|
+
continue
|
881
|
+
|
882
|
+
bbox = detection.get("bounding_box", detection.get("bbox"))
|
883
|
+
if not bbox:
|
884
|
+
continue
|
885
|
+
|
886
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
887
|
+
|
888
|
+
# Propagate canonical ID so that downstream logic (including zone
|
889
|
+
# tracking and event generation) operates on the de-duplicated ID.
|
890
|
+
detection["track_id"] = canonical_id
|
891
|
+
current_frame_tracks.add(canonical_id)
|
892
|
+
|
893
|
+
# Update total track IDs with new canonical IDs from current frame
|
894
|
+
old_total_count = len(self._total_track_ids)
|
895
|
+
self._total_track_ids.update(current_frame_tracks)
|
896
|
+
self._current_frame_track_ids = current_frame_tracks
|
897
|
+
|
898
|
+
# Update total count
|
899
|
+
self._total_count = len(self._total_track_ids)
|
900
|
+
self._last_update_time = time.time()
|
901
|
+
|
902
|
+
# Log tracking state updates
|
903
|
+
if len(current_frame_tracks) > 0:
|
904
|
+
new_tracks = current_frame_tracks - (self._total_track_ids - current_frame_tracks)
|
905
|
+
if new_tracks:
|
906
|
+
self.logger.debug(
|
907
|
+
f"Tracking state updated: {len(new_tracks)} new canonical track IDs added, total unique tracks: {self._total_count}")
|
908
|
+
else:
|
909
|
+
self.logger.debug(
|
910
|
+
f"Tracking state updated: {len(current_frame_tracks)} current frame canonical tracks, total unique tracks: {self._total_count}")
|
911
|
+
|
912
|
+
def get_total_count(self) -> int:
|
913
|
+
"""Get the total count of unique people tracked across all calls."""
|
914
|
+
return self._total_count
|
915
|
+
|
916
|
+
def get_current_frame_count(self) -> int:
|
917
|
+
"""Get the count of people in the current frame."""
|
918
|
+
return len(self._current_frame_track_ids)
|
919
|
+
|
920
|
+
def get_total_frames_processed(self) -> int:
|
921
|
+
"""Get the total number of frames processed across all calls."""
|
922
|
+
return self._total_frame_counter
|
923
|
+
|
924
|
+
def set_global_frame_offset(self, offset: int) -> None:
|
925
|
+
"""Set the global frame offset for video chunk processing."""
|
926
|
+
self._global_frame_offset = offset
|
927
|
+
self.logger.info(f"Global frame offset set to: {offset}")
|
928
|
+
|
929
|
+
def get_global_frame_offset(self) -> int:
|
930
|
+
"""Get the current global frame offset."""
|
931
|
+
return self._global_frame_offset
|
932
|
+
|
933
|
+
def update_global_frame_offset(self, frames_in_chunk: int) -> None:
|
934
|
+
"""Update global frame offset after processing a chunk."""
|
935
|
+
old_offset = self._global_frame_offset
|
936
|
+
self._global_frame_offset += frames_in_chunk
|
937
|
+
self.logger.info(f"Global frame offset updated: {old_offset} -> {self._global_frame_offset} (added {frames_in_chunk} frames)")
|
938
|
+
|
939
|
+
def get_global_frame_id(self, local_frame_id: str) -> str:
|
940
|
+
"""Convert local frame ID to global frame ID."""
|
941
|
+
try:
|
942
|
+
# Try to convert local_frame_id to integer
|
943
|
+
local_frame_num = int(local_frame_id)
|
944
|
+
global_frame_num = local_frame_num #+ self._global_frame_offset
|
945
|
+
return str(global_frame_num)
|
946
|
+
except (ValueError, TypeError):
|
947
|
+
# If local_frame_id is not a number (e.g., timestamp), return as is
|
948
|
+
return local_frame_id
|
949
|
+
|
950
|
+
def get_track_ids_info(self) -> Dict[str, Any]:
|
951
|
+
"""Get detailed information about track IDs."""
|
952
|
+
return {
|
953
|
+
"total_count": self._total_count,
|
954
|
+
"current_frame_count": len(self._current_frame_track_ids),
|
955
|
+
"total_unique_track_ids": len(self._total_track_ids),
|
956
|
+
"current_frame_track_ids": list(self._current_frame_track_ids),
|
957
|
+
"last_update_time": self._last_update_time,
|
958
|
+
"total_frames_processed": self._total_frame_counter
|
959
|
+
}
|
960
|
+
|
961
|
+
def get_tracking_debug_info(self) -> Dict[str, Any]:
|
962
|
+
"""Get detailed debugging information about tracking state."""
|
963
|
+
return {
|
964
|
+
"total_track_ids": list(self._total_track_ids),
|
965
|
+
"current_frame_track_ids": list(self._current_frame_track_ids),
|
966
|
+
"total_count": self._total_count,
|
967
|
+
"current_frame_count": len(self._current_frame_track_ids),
|
968
|
+
"total_frames_processed": self._total_frame_counter,
|
969
|
+
"last_update_time": self._last_update_time,
|
970
|
+
"zone_current_track_ids": {zone: list(tracks) for zone, tracks in self._zone_current_track_ids.items()},
|
971
|
+
"zone_total_track_ids": {zone: list(tracks) for zone, tracks in self._zone_total_track_ids.items()},
|
972
|
+
"zone_current_counts": self._zone_current_counts.copy(),
|
973
|
+
"zone_total_counts": self._zone_total_counts.copy(),
|
974
|
+
"global_frame_offset": self._global_frame_offset,
|
975
|
+
"frames_in_current_chunk": self._frames_in_current_chunk
|
976
|
+
}
|
977
|
+
|
978
|
+
def get_frame_info(self) -> Dict[str, Any]:
|
979
|
+
"""Get detailed information about frame processing and global frame offset."""
|
980
|
+
return {
|
981
|
+
"global_frame_offset": self._global_frame_offset,
|
982
|
+
"total_frames_processed": self._total_frame_counter,
|
983
|
+
"frames_in_current_chunk": self._frames_in_current_chunk,
|
984
|
+
"next_global_frame": self._global_frame_offset + self._frames_in_current_chunk
|
985
|
+
}
|
986
|
+
|
987
|
+
def reset_tracking_state(self) -> None:
|
988
|
+
"""
|
989
|
+
WARNING: This completely resets ALL tracking data including cumulative totals!
|
990
|
+
|
991
|
+
This should ONLY be used when:
|
992
|
+
- Starting a completely new tracking session
|
993
|
+
- Switching to a different video/stream
|
994
|
+
- Manual reset requested by user
|
995
|
+
|
996
|
+
For clearing expired/stale tracks, use clear_current_frame_tracking() instead.
|
997
|
+
"""
|
998
|
+
self._total_track_ids.clear()
|
999
|
+
self._current_frame_track_ids.clear()
|
1000
|
+
self._total_count = 0
|
1001
|
+
self._last_update_time = time.time()
|
1002
|
+
|
1003
|
+
# Clear zone tracking data
|
1004
|
+
self._zone_current_track_ids.clear()
|
1005
|
+
self._zone_total_track_ids.clear()
|
1006
|
+
self._zone_current_counts.clear()
|
1007
|
+
self._zone_total_counts.clear()
|
1008
|
+
|
1009
|
+
# Reset frame counter and global frame offset
|
1010
|
+
self._total_frame_counter = 0
|
1011
|
+
self._global_frame_offset = 0
|
1012
|
+
self._frames_in_current_chunk = 0
|
1013
|
+
|
1014
|
+
# Clear aliasing information
|
1015
|
+
self._canonical_tracks.clear()
|
1016
|
+
self._track_aliases.clear()
|
1017
|
+
self._tracking_start_time = None
|
1018
|
+
|
1019
|
+
self.logger.warning(" FULL tracking state reset - all track IDs, zone data, frame counter, and global frame offset cleared. Cumulative totals lost!")
|
1020
|
+
|
1021
|
+
def clear_current_frame_tracking(self) -> int:
|
1022
|
+
"""
|
1023
|
+
MANUAL USE ONLY: Clear only current frame tracking data while preserving cumulative totals.
|
1024
|
+
|
1025
|
+
This method is NOT called automatically anywhere in the code.
|
1026
|
+
|
1027
|
+
This is the SAFE method to use for manual clearing of stale/expired current frame data.
|
1028
|
+
The cumulative total (self._total_count) is always preserved.
|
1029
|
+
|
1030
|
+
In streaming scenarios, you typically don't need to call this at all.
|
1031
|
+
|
1032
|
+
Returns:
|
1033
|
+
Number of current frame tracks cleared
|
1034
|
+
"""
|
1035
|
+
old_current_count = len(self._current_frame_track_ids)
|
1036
|
+
self._current_frame_track_ids.clear()
|
1037
|
+
|
1038
|
+
# Clear current zone tracking (but keep total zone tracking)
|
1039
|
+
cleared_zone_tracks = 0
|
1040
|
+
for zone_name in list(self._zone_current_track_ids.keys()):
|
1041
|
+
cleared_zone_tracks += len(self._zone_current_track_ids[zone_name])
|
1042
|
+
self._zone_current_track_ids[zone_name].clear()
|
1043
|
+
self._zone_current_counts[zone_name] = 0
|
1044
|
+
|
1045
|
+
# Update timestamp
|
1046
|
+
self._last_update_time = time.time()
|
1047
|
+
|
1048
|
+
self.logger.info(f"Cleared {old_current_count} current frame tracks and {cleared_zone_tracks} zone current tracks. Cumulative total preserved: {self._total_count}")
|
1049
|
+
return old_current_count
|
1050
|
+
|
1051
|
+
def reset_frame_counter(self) -> None:
|
1052
|
+
"""Reset only the frame counter."""
|
1053
|
+
old_count = self._total_frame_counter
|
1054
|
+
self._total_frame_counter = 0
|
1055
|
+
self.logger.info(f"Frame counter reset from {old_count} to 0")
|
1056
|
+
|
1057
|
+
def clear_expired_tracks(self, max_age_seconds: float = 300.0) -> int:
|
1058
|
+
"""
|
1059
|
+
MANUAL USE ONLY: Clear current frame tracking data if no updates for a while.
|
1060
|
+
|
1061
|
+
This method is NOT called automatically anywhere in the code.
|
1062
|
+
It's provided as a utility function for manual cleanup if needed.
|
1063
|
+
|
1064
|
+
In streaming scenarios, you typically don't need to call this at all.
|
1065
|
+
The cumulative total should keep growing as new unique people are detected.
|
1066
|
+
|
1067
|
+
This method only clears current frame tracking data while preserving
|
1068
|
+
the cumulative total count. The cumulative total should never decrease.
|
1069
|
+
|
1070
|
+
Args:
|
1071
|
+
max_age_seconds: Maximum age in seconds before clearing current frame tracks
|
1072
|
+
|
1073
|
+
Returns:
|
1074
|
+
Number of current frame tracks cleared
|
1075
|
+
"""
|
1076
|
+
current_time = time.time()
|
1077
|
+
if current_time - self._last_update_time > max_age_seconds:
|
1078
|
+
# Use the safe method that preserves cumulative totals
|
1079
|
+
cleared_count = self.clear_current_frame_tracking()
|
1080
|
+
self.logger.info(f"Manual cleanup: cleared {cleared_count} expired current frame tracks (age > {max_age_seconds}s)")
|
1081
|
+
return cleared_count
|
1082
|
+
return 0
|
1083
|
+
|
1084
|
+
def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: ProximityConfig) -> Dict[str, Dict[str, Any]]:
|
1085
|
+
"""
|
1086
|
+
Update zone tracking with current frame data.
|
1087
|
+
|
1088
|
+
Args:
|
1089
|
+
zone_analysis: Current zone analysis results
|
1090
|
+
detections: List of detections with track IDs
|
1091
|
+
config: proximity detection configuration with zone polygons
|
1092
|
+
|
1093
|
+
Returns:
|
1094
|
+
Enhanced zone analysis with tracking information
|
1095
|
+
"""
|
1096
|
+
if not zone_analysis or not config.zone_config or not config.zone_config.zones:
|
1097
|
+
return {}
|
1098
|
+
|
1099
|
+
enhanced_zone_analysis = {}
|
1100
|
+
zones = config.zone_config.zones
|
1101
|
+
|
1102
|
+
# Get current frame track IDs in each zone
|
1103
|
+
current_frame_zone_tracks = {}
|
1104
|
+
|
1105
|
+
# Initialize zone tracking for all zones
|
1106
|
+
for zone_name in zones.keys():
|
1107
|
+
current_frame_zone_tracks[zone_name] = set()
|
1108
|
+
if zone_name not in self._zone_current_track_ids:
|
1109
|
+
self._zone_current_track_ids[zone_name] = set()
|
1110
|
+
if zone_name not in self._zone_total_track_ids:
|
1111
|
+
self._zone_total_track_ids[zone_name] = set()
|
1112
|
+
|
1113
|
+
# Check each detection against each zone
|
1114
|
+
for detection in detections:
|
1115
|
+
track_id = detection.get("track_id")
|
1116
|
+
if track_id is None:
|
1117
|
+
continue
|
1118
|
+
|
1119
|
+
# Get detection bbox
|
1120
|
+
bbox = detection.get("bounding_box", detection.get("bbox"))
|
1121
|
+
if not bbox:
|
1122
|
+
continue
|
1123
|
+
|
1124
|
+
# Get detection center point
|
1125
|
+
center_point = get_bbox_bottom25_center(bbox) #get_bbox_center(bbox)
|
1126
|
+
|
1127
|
+
# Check which zone this detection is in using actual zone polygons
|
1128
|
+
for zone_name, zone_polygon in zones.items():
|
1129
|
+
# Convert polygon points to tuples for point_in_polygon function
|
1130
|
+
# zone_polygon format: [[x1, y1], [x2, y2], [x3, y3], ...]
|
1131
|
+
polygon_points = [(point[0], point[1]) for point in zone_polygon]
|
1132
|
+
|
1133
|
+
# Check if detection center is inside the zone polygon using ray casting algorithm
|
1134
|
+
if point_in_polygon(center_point, polygon_points):
|
1135
|
+
current_frame_zone_tracks[zone_name].add(track_id)
|
1136
|
+
|
1137
|
+
# Update zone tracking for each zone
|
1138
|
+
for zone_name, zone_counts in zone_analysis.items():
|
1139
|
+
# Get current frame tracks for this zone
|
1140
|
+
current_tracks = current_frame_zone_tracks.get(zone_name, set())
|
1141
|
+
|
1142
|
+
# Update current zone tracks
|
1143
|
+
self._zone_current_track_ids[zone_name] = current_tracks
|
1144
|
+
|
1145
|
+
# Update total zone tracks (accumulate all track IDs that have been in this zone)
|
1146
|
+
self._zone_total_track_ids[zone_name].update(current_tracks)
|
1147
|
+
|
1148
|
+
# Update counts
|
1149
|
+
self._zone_current_counts[zone_name] = len(current_tracks)
|
1150
|
+
self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
|
1151
|
+
|
1152
|
+
# Create enhanced zone analysis
|
1153
|
+
enhanced_zone_analysis[zone_name] = {
|
1154
|
+
"current_count": self._zone_current_counts[zone_name],
|
1155
|
+
"total_count": self._zone_total_counts[zone_name],
|
1156
|
+
"current_track_ids": list(current_tracks),
|
1157
|
+
"total_track_ids": list(self._zone_total_track_ids[zone_name]),
|
1158
|
+
"original_counts": zone_counts # Preserve original zone counts
|
1159
|
+
}
|
1160
|
+
|
1161
|
+
return enhanced_zone_analysis
|
1162
|
+
|
1163
|
+
def get_zone_tracking_info(self) -> Dict[str, Dict[str, Any]]:
|
1164
|
+
"""Get detailed zone tracking information."""
|
1165
|
+
return {
|
1166
|
+
zone_name: {
|
1167
|
+
"current_count": self._zone_current_counts.get(zone_name, 0),
|
1168
|
+
"total_count": self._zone_total_counts.get(zone_name, 0),
|
1169
|
+
"current_track_ids": list(self._zone_current_track_ids.get(zone_name, set())),
|
1170
|
+
"total_track_ids": list(self._zone_total_track_ids.get(zone_name, set()))
|
1171
|
+
}
|
1172
|
+
for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
|
1173
|
+
}
|
1174
|
+
|
1175
|
+
def get_zone_current_count(self, zone_name: str) -> int:
|
1176
|
+
"""Get current count of people in a specific zone."""
|
1177
|
+
return self._zone_current_counts.get(zone_name, 0)
|
1178
|
+
|
1179
|
+
def get_zone_total_count(self, zone_name: str) -> int:
|
1180
|
+
"""Get total count of people who have been in a specific zone."""
|
1181
|
+
return self._zone_total_counts.get(zone_name, 0)
|
1182
|
+
|
1183
|
+
def get_all_zone_counts(self) -> Dict[str, Dict[str, int]]:
|
1184
|
+
"""Get current and total counts for all zones."""
|
1185
|
+
return {
|
1186
|
+
zone_name: {
|
1187
|
+
"current": self._zone_current_counts.get(zone_name, 0),
|
1188
|
+
"total": self._zone_total_counts.get(zone_name, 0)
|
1189
|
+
}
|
1190
|
+
for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
|
1191
|
+
}
|
1192
|
+
|
1193
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
1194
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
1195
|
+
dt = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
|
1196
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
1197
|
+
|
1198
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
1199
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
1200
|
+
hours = int(timestamp // 3600)
|
1201
|
+
minutes = int((timestamp % 3600) // 60)
|
1202
|
+
seconds = round(float(timestamp % 60),2)
|
1203
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
1204
|
+
|
1205
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: str=None) -> str:
|
1206
|
+
"""Get formatted current timestamp based on stream type."""
|
1207
|
+
if not stream_info:
|
1208
|
+
return "00:00:00.00"
|
1209
|
+
# is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
1210
|
+
if precision:
|
1211
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
1212
|
+
if frame_id:
|
1213
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
1214
|
+
else:
|
1215
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
1216
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
1217
|
+
return stream_time_str
|
1218
|
+
else:
|
1219
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
1220
|
+
|
1221
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
1222
|
+
if frame_id:
|
1223
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
1224
|
+
else:
|
1225
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
1226
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
1227
|
+
return stream_time_str
|
1228
|
+
else:
|
1229
|
+
# For streams, use stream_time from stream_info
|
1230
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
1231
|
+
if stream_time_str:
|
1232
|
+
# Parse the high precision timestamp string to get timestamp
|
1233
|
+
try:
|
1234
|
+
# Remove " UTC" suffix and parse
|
1235
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
1236
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
1237
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
1238
|
+
return self._format_timestamp_for_stream(timestamp)
|
1239
|
+
except:
|
1240
|
+
# Fallback to current time if parsing fails
|
1241
|
+
return self._format_timestamp_for_stream(time.time())
|
1242
|
+
else:
|
1243
|
+
return self._format_timestamp_for_stream(time.time())
|
1244
|
+
|
1245
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
1246
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
1247
|
+
if not stream_info:
|
1248
|
+
return "00:00:00"
|
1249
|
+
|
1250
|
+
if precision:
|
1251
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
1252
|
+
return "00:00:00"
|
1253
|
+
else:
|
1254
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
1255
|
+
|
1256
|
+
|
1257
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
1258
|
+
# If video format, start from 00:00:00
|
1259
|
+
return "00:00:00"
|
1260
|
+
else:
|
1261
|
+
# For streams, use tracking start time or current time with minutes/seconds reset
|
1262
|
+
if self._tracking_start_time is None:
|
1263
|
+
# Try to extract timestamp from stream_time string
|
1264
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
1265
|
+
if stream_time_str:
|
1266
|
+
try:
|
1267
|
+
# Remove " UTC" suffix and parse
|
1268
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
1269
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
1270
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
1271
|
+
except:
|
1272
|
+
# Fallback to current time if parsing fails
|
1273
|
+
self._tracking_start_time = time.time()
|
1274
|
+
else:
|
1275
|
+
self._tracking_start_time = time.time()
|
1276
|
+
|
1277
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
1278
|
+
# Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
|
1279
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
1280
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
1281
|
+
|
1282
|
+
def _extract_frame_id_from_tracking(self, frame_detections: List[Dict], frame_key: str) -> str:
|
1283
|
+
"""Extract frame ID from tracking data."""
|
1284
|
+
# Priority 1: Check if detections have frame information
|
1285
|
+
if frame_detections and len(frame_detections) > 0:
|
1286
|
+
first_detection = frame_detections[0]
|
1287
|
+
if "frame" in first_detection:
|
1288
|
+
return str(first_detection["frame"])
|
1289
|
+
elif "frame_id" in first_detection:
|
1290
|
+
return str(first_detection["frame_id"])
|
1291
|
+
# Priority 2: Use frame_key from input data
|
1292
|
+
return str(frame_key)
|
1293
|
+
|
1294
|
+
def _robust_zone_total(self, zone_count):
|
1295
|
+
"""Helper method to robustly calculate zone total."""
|
1296
|
+
if isinstance(zone_count, dict):
|
1297
|
+
total = 0
|
1298
|
+
for v in zone_count.values():
|
1299
|
+
if isinstance(v, int):
|
1300
|
+
total += v
|
1301
|
+
elif isinstance(v, list):
|
1302
|
+
total += len(v)
|
1303
|
+
return total
|
1304
|
+
elif isinstance(zone_count, list):
|
1305
|
+
return len(zone_count)
|
1306
|
+
elif isinstance(zone_count, int):
|
1307
|
+
return zone_count
|
1308
|
+
else:
|
1309
|
+
return 0
|
1310
|
+
|
1311
|
+
# --------------------------------------------------------------------- #
|
1312
|
+
# Private helpers for canonical track aliasing #
|
1313
|
+
# --------------------------------------------------------------------- #
|
1314
|
+
|
1315
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
1316
|
+
"""Compute IoU between two bounding boxes that may be either list or dict.
|
1317
|
+
Falls back to geometry_utils.calculate_iou when both boxes are dicts.
|
1318
|
+
"""
|
1319
|
+
# Handle dict format directly with calculate_iou (supports many keys)
|
1320
|
+
if isinstance(box1, dict) and isinstance(box2, dict):
|
1321
|
+
return calculate_iou(box1, box2)
|
1322
|
+
|
1323
|
+
# Helper to convert bbox (dict or list) to a list [x1,y1,x2,y2]
|
1324
|
+
def _bbox_to_list(bbox):
|
1325
|
+
if bbox is None:
|
1326
|
+
return []
|
1327
|
+
if isinstance(bbox, list):
|
1328
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
1329
|
+
if isinstance(bbox, dict):
|
1330
|
+
if "xmin" in bbox:
|
1331
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
1332
|
+
if "x1" in bbox:
|
1333
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
1334
|
+
# Fallback: take first four values in insertion order
|
1335
|
+
values = list(bbox.values())
|
1336
|
+
return values[:4] if len(values) >= 4 else []
|
1337
|
+
# Unsupported type
|
1338
|
+
return []
|
1339
|
+
|
1340
|
+
list1 = _bbox_to_list(box1)
|
1341
|
+
list2 = _bbox_to_list(box2)
|
1342
|
+
|
1343
|
+
if len(list1) < 4 or len(list2) < 4:
|
1344
|
+
return 0.0
|
1345
|
+
|
1346
|
+
x1_min, y1_min, x1_max, y1_max = list1
|
1347
|
+
x2_min, y2_min, x2_max, y2_max = list2
|
1348
|
+
|
1349
|
+
# Ensure correct ordering of coordinates
|
1350
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
1351
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
1352
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
1353
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
1354
|
+
|
1355
|
+
inter_x_min = max(x1_min, x2_min)
|
1356
|
+
inter_y_min = max(y1_min, y2_min)
|
1357
|
+
inter_x_max = min(x1_max, x2_max)
|
1358
|
+
inter_y_max = min(y1_max, y2_max)
|
1359
|
+
|
1360
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
1361
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
1362
|
+
inter_area = inter_w * inter_h
|
1363
|
+
|
1364
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
1365
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
1366
|
+
union_area = area1 + area2 - inter_area
|
1367
|
+
|
1368
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
1369
|
+
|
1370
|
+
def _get_canonical_id(self, raw_id: Any) -> Any:
|
1371
|
+
"""Return the canonical ID for a raw tracker-generated ID."""
|
1372
|
+
return self._track_aliases.get(raw_id, raw_id)
|
1373
|
+
|
1374
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: List[float]) -> Any:
|
1375
|
+
"""Merge the raw track into an existing canonical track if possible,
|
1376
|
+
otherwise register it as a new canonical track. Returns the canonical
|
1377
|
+
ID to use for counting.
|
1378
|
+
"""
|
1379
|
+
now = time.time()
|
1380
|
+
|
1381
|
+
# Fast path: raw_id already mapped
|
1382
|
+
if raw_id in self._track_aliases:
|
1383
|
+
canonical_id = self._track_aliases[raw_id]
|
1384
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
1385
|
+
if track_info is not None:
|
1386
|
+
track_info["last_bbox"] = bbox
|
1387
|
+
track_info["last_update"] = now
|
1388
|
+
track_info["raw_ids"].add(raw_id)
|
1389
|
+
return canonical_id
|
1390
|
+
|
1391
|
+
# Attempt to merge with an existing canonical track
|
1392
|
+
for canonical_id, info in self._canonical_tracks.items():
|
1393
|
+
# Only consider recently updated tracks to avoid stale matches
|
1394
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
1395
|
+
continue
|
1396
|
+
|
1397
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
1398
|
+
if iou >= self._track_merge_iou_threshold:
|
1399
|
+
# Merge raw_id into canonical track
|
1400
|
+
self._track_aliases[raw_id] = canonical_id
|
1401
|
+
info["last_bbox"] = bbox
|
1402
|
+
info["last_update"] = now
|
1403
|
+
info["raw_ids"].add(raw_id)
|
1404
|
+
self.logger.debug(
|
1405
|
+
f"Merged raw track {raw_id} into canonical track {canonical_id} (IoU={iou:.2f})")
|
1406
|
+
return canonical_id
|
1407
|
+
|
1408
|
+
# No match found – create a new canonical track
|
1409
|
+
canonical_id = raw_id
|
1410
|
+
self._track_aliases[raw_id] = canonical_id
|
1411
|
+
self._canonical_tracks[canonical_id] = {
|
1412
|
+
"last_bbox": bbox,
|
1413
|
+
"last_update": now,
|
1414
|
+
"raw_ids": {raw_id},
|
1415
|
+
}
|
1416
|
+
self.logger.debug(f"Registered new canonical track {canonical_id}")
|
1417
|
+
return canonical_id
|
1418
|
+
|
1419
|
+
def _format_timestamp(self, timestamp: float) -> str:
|
1420
|
+
"""Format a timestamp for human-readable output."""
|
1421
|
+
return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
|
1422
|
+
|
1423
|
+
def _get_tracking_start_time(self) -> str:
|
1424
|
+
"""Get the tracking start time, formatted as a string."""
|
1425
|
+
if self._tracking_start_time is None:
|
1426
|
+
return "N/A"
|
1427
|
+
return self._format_timestamp(self._tracking_start_time)
|
1428
|
+
|
1429
|
+
def _set_tracking_start_time(self) -> None:
|
1430
|
+
"""Set the tracking start time to the current time."""
|
1431
|
+
self._tracking_start_time = time.time()
|
1432
|
+
|
1433
|
+
def get_config_schema(self) -> Dict[str, Any]:
|
1434
|
+
"""Get configuration schema for proximity detection."""
|
1435
|
+
return {
|
1436
|
+
"type": "object",
|
1437
|
+
"properties": {
|
1438
|
+
"confidence_threshold": {
|
1439
|
+
"type": "number",
|
1440
|
+
"minimum": 0.0,
|
1441
|
+
"maximum": 1.0,
|
1442
|
+
"default": 0.5,
|
1443
|
+
"description": "Minimum confidence threshold for detections"
|
1444
|
+
},
|
1445
|
+
"enable_tracking": {
|
1446
|
+
"type": "boolean",
|
1447
|
+
"default": False,
|
1448
|
+
"description": "Enable tracking for unique counting"
|
1449
|
+
},
|
1450
|
+
"zone_config": {
|
1451
|
+
"type": "object",
|
1452
|
+
"properties": {
|
1453
|
+
"zones": {
|
1454
|
+
"type": "object",
|
1455
|
+
"additionalProperties": {
|
1456
|
+
"type": "array",
|
1457
|
+
"items": {
|
1458
|
+
"type": "array",
|
1459
|
+
"items": {"type": "number"},
|
1460
|
+
"minItems": 2,
|
1461
|
+
"maxItems": 2
|
1462
|
+
},
|
1463
|
+
"minItems": 3
|
1464
|
+
},
|
1465
|
+
"description": "Zone definitions as polygons"
|
1466
|
+
},
|
1467
|
+
"zone_confidence_thresholds": {
|
1468
|
+
"type": "object",
|
1469
|
+
"additionalProperties": {"type": "number", "minimum": 0.0, "maximum": 1.0},
|
1470
|
+
"description": "Per-zone confidence thresholds"
|
1471
|
+
}
|
1472
|
+
}
|
1473
|
+
},
|
1474
|
+
"person_categories": {
|
1475
|
+
"type": "array",
|
1476
|
+
"items": {"type": "string"},
|
1477
|
+
"default": ["person"],
|
1478
|
+
"description": "Category names that represent people"
|
1479
|
+
},
|
1480
|
+
"enable_unique_counting": {
|
1481
|
+
"type": "boolean",
|
1482
|
+
"default": True,
|
1483
|
+
"description": "Enable unique proximity detection using tracking"
|
1484
|
+
},
|
1485
|
+
"time_window_minutes": {
|
1486
|
+
"type": "integer",
|
1487
|
+
"minimum": 1,
|
1488
|
+
"default": 60,
|
1489
|
+
"description": "Time window for counting analysis in minutes"
|
1490
|
+
},
|
1491
|
+
"alert_config": {
|
1492
|
+
"type": "object",
|
1493
|
+
"properties": {
|
1494
|
+
"count_thresholds": {
|
1495
|
+
"type": "object",
|
1496
|
+
"additionalProperties": {"type": "integer", "minimum": 1},
|
1497
|
+
"description": "Count thresholds for alerts"
|
1498
|
+
},
|
1499
|
+
"occupancy_thresholds": {
|
1500
|
+
"type": "object",
|
1501
|
+
"additionalProperties": {"type": "integer", "minimum": 1},
|
1502
|
+
"description": "Zone occupancy thresholds for alerts"
|
1503
|
+
},
|
1504
|
+
"alert_type": {
|
1505
|
+
"type": "array",
|
1506
|
+
"items": {"type": "string"},
|
1507
|
+
"default": ["Default"],
|
1508
|
+
"description": "To pass the type of alert. EG: email, sms, etc."
|
1509
|
+
},
|
1510
|
+
"alert_value": {
|
1511
|
+
"type": "array",
|
1512
|
+
"items": {"type": "string"},
|
1513
|
+
"default": ["JSON"],
|
1514
|
+
"description": "Alert value to pass the value based on type. EG: email id if type is email."
|
1515
|
+
},
|
1516
|
+
"alert_incident_category": {
|
1517
|
+
"type": "array",
|
1518
|
+
"items": {"type": "string"},
|
1519
|
+
"default": ["Incident Detection Alert"],
|
1520
|
+
"description": "Group and name the Alert category Type"
|
1521
|
+
},
|
1522
|
+
}
|
1523
|
+
}
|
1524
|
+
},
|
1525
|
+
"required": ["confidence_threshold"],
|
1526
|
+
"additionalProperties": False
|
1527
|
+
}
|
1528
|
+
|
1529
|
+
def create_default_config(self, **overrides) -> ProximityConfig:
|
1530
|
+
"""Create default configuration with optional overrides."""
|
1531
|
+
defaults = {
|
1532
|
+
"category": self.category,
|
1533
|
+
"usecase": self.name,
|
1534
|
+
"confidence_threshold": 0.5,
|
1535
|
+
"enable_tracking": False,
|
1536
|
+
"enable_analytics": True,
|
1537
|
+
"enable_unique_counting": True,
|
1538
|
+
"time_window_minutes": 60,
|
1539
|
+
"person_categories": ["person"],
|
1540
|
+
}
|
1541
|
+
defaults.update(overrides)
|
1542
|
+
return ProximityConfig(**defaults)
|
1543
|
+
|
1544
|
+
def _apply_smoothing(self, data: Any, config: ProximityConfig) -> Any:
|
1545
|
+
"""Apply smoothing to tracking data if enabled."""
|
1546
|
+
if self.smoothing_tracker is None:
|
1547
|
+
smoothing_config = BBoxSmoothingConfig(
|
1548
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
1549
|
+
window_size=config.smoothing_window_size,
|
1550
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
1551
|
+
confidence_threshold=config.confidence_threshold or 0.5,
|
1552
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
1553
|
+
enable_smoothing=True
|
1554
|
+
)
|
1555
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
1556
|
+
|
1557
|
+
smoothed_data = bbox_smoothing(data, self.smoothing_tracker.config, self.smoothing_tracker)
|
1558
|
+
self.logger.debug(f"Applied bbox smoothing to tracking results")
|
1559
|
+
return smoothed_data
|