matrice-analytics 0.1.97__py3-none-any.whl → 0.1.124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. matrice_analytics/post_processing/__init__.py +22 -0
  2. matrice_analytics/post_processing/advanced_tracker/config.py +8 -4
  3. matrice_analytics/post_processing/advanced_tracker/track_class_aggregator.py +128 -0
  4. matrice_analytics/post_processing/advanced_tracker/tracker.py +22 -1
  5. matrice_analytics/post_processing/config.py +17 -2
  6. matrice_analytics/post_processing/core/config.py +107 -1
  7. matrice_analytics/post_processing/face_reg/face_recognition.py +706 -73
  8. matrice_analytics/post_processing/face_reg/people_activity_logging.py +25 -14
  9. matrice_analytics/post_processing/post_processor.py +16 -0
  10. matrice_analytics/post_processing/usecases/__init__.py +9 -0
  11. matrice_analytics/post_processing/usecases/crowdflow.py +1088 -0
  12. matrice_analytics/post_processing/usecases/footfall.py +170 -22
  13. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +57 -38
  14. matrice_analytics/post_processing/usecases/parking_lot_analytics.py +1137 -0
  15. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +30 -4
  16. matrice_analytics/post_processing/usecases/vehicle_monitoring_drone_view.py +246 -3
  17. matrice_analytics/post_processing/usecases/vehicle_monitoring_parking_lot.py +36 -3
  18. matrice_analytics/post_processing/usecases/vehicle_monitoring_wrong_way.py +1021 -0
  19. matrice_analytics/post_processing/utils/__init__.py +5 -0
  20. matrice_analytics/post_processing/utils/agnostic_nms.py +759 -0
  21. matrice_analytics/post_processing/utils/alert_instance_utils.py +55 -7
  22. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +25 -2
  23. matrice_analytics/post_processing/utils/incident_manager_utils.py +12 -1
  24. matrice_analytics/post_processing/utils/parking_analytics_tracker.py +359 -0
  25. matrice_analytics/post_processing/utils/wrong_way_tracker.py +670 -0
  26. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/METADATA +1 -1
  27. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/RECORD +30 -23
  28. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/WHEEL +0 -0
  29. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/licenses/LICENSE.txt +0 -0
  30. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,670 @@
1
+ from typing import Any, Dict, List, Optional, Tuple, Set
2
+ from dataclasses import dataclass, field
3
+ from enum import Enum
4
+ from collections import deque
5
+ import math
6
+ import logging
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class WrongWayState(Enum):
12
+ """State machine states for wrong-way detection."""
13
+ NORMAL = "NORMAL"
14
+ SUSPECT = "SUSPECT"
15
+ WRONG_WAY = "WRONG_WAY"
16
+
17
+
18
+ class ReferenceStatus(Enum):
19
+ """Status of reference direction estimation."""
20
+ NONE = "NONE"
21
+ LEARNING = "LEARNING"
22
+ CONFIRMED = "CONFIRMED"
23
+
24
+
25
+ class ReferenceSource(Enum):
26
+ """Source of reference direction."""
27
+ NONE = "NONE"
28
+ USER_ZONE = "USER_ZONE"
29
+ AUTO = "AUTO"
30
+
31
+
32
+ @dataclass
33
+ class TrackMotionState:
34
+ """Per-track motion state for trajectory-based detection."""
35
+ track_id: Any
36
+ category: str
37
+
38
+ # Position tracking
39
+ position_prev: Optional[Tuple[float, float]] = None
40
+
41
+ # EWMA velocity (smoothed)
42
+ velocity_ewma: Tuple[float, float] = (0.0, 0.0)
43
+
44
+ # Wrong-way confidence ∈ [0, 1]
45
+ wrong_way_confidence: float = 0.0
46
+
47
+ # State machine
48
+ state: WrongWayState = WrongWayState.NORMAL
49
+
50
+ # Tracking metadata
51
+ first_seen_frame: int = 0
52
+ last_seen_frame: int = 0
53
+ frames_tracked: int = 0
54
+
55
+ # For output
56
+ last_bbox: Optional[Dict[str, Any]] = None
57
+ last_detection_confidence: float = 0.0
58
+ last_direction_score: float = 0.0
59
+
60
+
61
+ @dataclass
62
+ class AutoReferenceState:
63
+ """State for auto-reference direction estimation."""
64
+ status: ReferenceStatus = ReferenceStatus.NONE
65
+ reference_vector: Optional[Tuple[float, float]] = None
66
+ confidence: float = 0.0
67
+
68
+ # Motion buffer for estimation
69
+ motion_samples: deque = field(default_factory=lambda: deque(maxlen=100))
70
+
71
+ # Stability tracking
72
+ dominant_direction_history: deque = field(default_factory=lambda: deque(maxlen=30))
73
+ frames_since_last_change: int = 0
74
+
75
+
76
+ class WrongWayDetectionTracker:
77
+ """
78
+ Trajectory-based wrong-way vehicle detection tracker.
79
+
80
+ Uses EWMA velocity smoothing and continuous confidence accumulation
81
+ to detect vehicles moving against the expected traffic direction.
82
+
83
+ Reference Direction Sources (in priority order):
84
+ 1. User-defined zone_config (first point → last point)
85
+ 2. Auto-estimation from observed traffic flow
86
+
87
+ Auto-Reference Re-Learning:
88
+ - For AUTO sources, reference is periodically re-learned to adapt to
89
+ changing traffic patterns (e.g., time-of-day flow changes)
90
+ - Re-learning interval configurable via auto_ref_relearn_interval_frames
91
+ - User-defined zones (USER_ZONE) are never re-learned
92
+ """
93
+
94
+ def __init__(
95
+ self,
96
+ # EWMA velocity smoothing
97
+ alpha: float = 0.20,
98
+
99
+ # Minimum velocity to consider motion (pixels/frame)
100
+ v_min: float = 1.2,
101
+
102
+ # Confidence accumulation
103
+ beta: float = 0.10, # Confidence gain per wrong-way frame
104
+ gamma: float = 0.018, # Confidence decay per frame
105
+
106
+ # State thresholds
107
+ c_suspect: float = 0.25, # Threshold to enter SUSPECT
108
+ c_confirm: float = 0.65, # Threshold to confirm WRONG_WAY
109
+
110
+ # Decay from WRONG_WAY
111
+ c_decay_from_wrong: float = 0.30, # Must drop below this to decay
112
+ correct_direction_frames_to_decay: int = 20, # Frames of correct movement
113
+
114
+ # New: require this many consecutive strong wrong-way frames before confirming
115
+ min_confirm_frames: int = 12,
116
+
117
+ # Track cleanup
118
+ stale_track_frames: int = 40,
119
+
120
+ # Auto-reference parameters
121
+ auto_ref_relearn_interval_frames: int = 108000, # 1 hour at 30 FPS
122
+ auto_ref_min_tracks: int = 5,
123
+ auto_ref_warmup_frames: int = 90,
124
+ auto_ref_alpha: float = 0.05,
125
+ auto_ref_confirm_threshold: float = 0.7,
126
+ auto_ref_stability_frames: int = 60
127
+ ):
128
+ # EWMA parameters
129
+ self.alpha = alpha
130
+ self.v_min = v_min
131
+
132
+ # Confidence parameters
133
+ self.beta = beta
134
+ self.gamma = gamma
135
+ self.c_suspect = c_suspect
136
+ self.c_confirm = c_confirm
137
+ self.c_decay_from_wrong = c_decay_from_wrong
138
+ self.correct_direction_frames_to_decay = correct_direction_frames_to_decay
139
+
140
+ # New confirmation streak requirement
141
+ self.min_confirm_frames = min_confirm_frames
142
+
143
+ # Cleanup
144
+ self.stale_track_frames = stale_track_frames
145
+
146
+ # Auto-reference parameters
147
+ self.auto_ref_relearn_interval_frames = auto_ref_relearn_interval_frames
148
+ self.auto_ref_min_tracks = auto_ref_min_tracks
149
+ self.auto_ref_warmup_frames = auto_ref_warmup_frames
150
+ self.auto_ref_alpha = auto_ref_alpha
151
+ self.auto_ref_confirm_threshold = auto_ref_confirm_threshold
152
+ self.auto_ref_stability_frames = auto_ref_stability_frames
153
+
154
+ # Reference direction state
155
+ self._reference_vector: Optional[Tuple[float, float]] = None
156
+ self._reference_source: ReferenceSource = ReferenceSource.NONE
157
+ self._reference_status: ReferenceStatus = ReferenceStatus.NONE
158
+
159
+ # Auto-reference estimation stateauto_ref_relearn_interval_frames
160
+ self._auto_ref_state = AutoReferenceState()
161
+
162
+ # Frame at which auto-reference was last confirmed (for re-learning)
163
+ self._auto_ref_confirmed_at_frame: int = 0
164
+
165
+ # Per-track states
166
+ self._track_states: Dict[Any, TrackMotionState] = {}
167
+
168
+ # For decay from WRONG_WAY tracking
169
+ self._correct_direction_streak: Dict[Any, int] = {}
170
+
171
+ # New: consecutive wrong-way evidence streak for confirmation
172
+ self._wrong_way_streak: Dict[Any, int] = {}
173
+
174
+ # Cumulative counts
175
+ self._confirmed_wrong_way_track_ids: Set[Any] = set()
176
+
177
+ # Frame counter
178
+ self._frame_count = 0
179
+
180
+ logger.info(
181
+ f"WrongWayDetectionTracker initialized: "
182
+ f"alpha={alpha}, v_min={v_min}, beta={beta}, gamma={gamma}, "
183
+ f"c_suspect={c_suspect}, c_confirm={c_confirm}, "
184
+ f"min_confirm_frames={min_confirm_frames}, "
185
+ f"auto_ref_relearn_interval={auto_ref_relearn_interval_frames} frames"
186
+ )
187
+
188
+ def set_reference_from_zone(self, zone_polygon: List[List[float]]) -> bool:
189
+ if not zone_polygon or len(zone_polygon) < 2:
190
+ logger.warning("Zone polygon must have at least 2 points")
191
+ return False
192
+
193
+ first_point = zone_polygon[0]
194
+ last_point = zone_polygon[-1]
195
+
196
+ dx = last_point[0] - first_point[0]
197
+ dy = last_point[1] - first_point[1]
198
+
199
+ magnitude = math.sqrt(dx*dx + dy*dy)
200
+ if magnitude < 1e-6:
201
+ logger.warning("Zone first and last points are too close")
202
+ return False
203
+
204
+ self._reference_vector = (dx / magnitude, dy / magnitude)
205
+ self._reference_source = ReferenceSource.USER_ZONE
206
+ self._reference_status = ReferenceStatus.CONFIRMED
207
+
208
+ logger.info(
209
+ f"Reference direction set from zone: "
210
+ f"({first_point[0]:.1f}, {first_point[1]:.1f}) → "
211
+ f"({last_point[0]:.1f}, {last_point[1]:.1f}), "
212
+ f"vector=({self._reference_vector[0]:.3f}, {self._reference_vector[1]:.3f})"
213
+ )
214
+ return True
215
+
216
+ def _check_auto_ref_relearn(self, current_frame: int) -> None:
217
+ """
218
+ Check if auto-reference should be re-learned based on interval.
219
+
220
+ Only applies to AUTO reference sources. USER_ZONE references are never re-learned.
221
+ During re-learning, the current reference is preserved to avoid detection gaps.
222
+ """
223
+ # Only re-learn AUTO references, never USER_ZONE
224
+ if self._reference_source != ReferenceSource.AUTO:
225
+ return
226
+
227
+ # Check if interval has elapsed since last confirmation
228
+ frames_since_confirm = current_frame - self._auto_ref_confirmed_at_frame
229
+
230
+ if frames_since_confirm >= self.auto_ref_relearn_interval_frames:
231
+ logger.info(
232
+ f"[Frame {current_frame}] Auto-reference RE-LEARN triggered: "
233
+ f"{frames_since_confirm} frames since last confirmation "
234
+ f"(interval={self.auto_ref_relearn_interval_frames}). "
235
+ f"Current vector=({self._reference_vector[0]:.3f}, {self._reference_vector[1]:.3f})"
236
+ )
237
+
238
+ # Reset auto-reference state for fresh learning
239
+ # NOTE: We preserve _reference_vector and _reference_status during re-learning
240
+ # so detection continues to work. Only reset the learning state.
241
+ self._auto_ref_state = AutoReferenceState()
242
+
243
+ # Temporarily set source to NONE to trigger sample collection
244
+ # but keep the current reference active for detection
245
+ self._reference_source = ReferenceSource.NONE
246
+
247
+ # Note: _reference_vector and _reference_status remain CONFIRMED
248
+ # so wrong-way detection continues using the old reference
249
+ # until a new one is confirmed
250
+
251
+ def update(
252
+ self,
253
+ detections: List[Dict[str, Any]],
254
+ current_frame: int
255
+ ) -> Dict[str, Any]:
256
+ self._frame_count = current_frame
257
+
258
+ # Check if auto-reference needs re-learning
259
+ self._check_auto_ref_relearn(current_frame)
260
+
261
+ seen_track_ids: Set[Any] = set()
262
+
263
+ wrong_way_detections: List[Dict[str, Any]] = []
264
+ suspect_detections: List[Dict[str, Any]] = []
265
+
266
+ for detection in detections:
267
+ track_id = detection.get("track_id")
268
+ if track_id is None:
269
+ continue
270
+
271
+ seen_track_ids.add(track_id)
272
+
273
+ bbox = detection.get("bounding_box", detection.get("bbox"))
274
+ if not bbox:
275
+ continue
276
+
277
+ position = self._get_bbox_bottom25_center(bbox)
278
+ if position == (0, 0):
279
+ continue
280
+
281
+ state = self._get_or_create_state(
282
+ track_id=track_id,
283
+ category=detection.get("category", "unknown"),
284
+ current_frame=current_frame,
285
+ position=position
286
+ )
287
+
288
+ state.last_bbox = bbox
289
+ state.last_detection_confidence = detection.get("confidence", 0.0)
290
+ state.last_seen_frame = current_frame
291
+ state.frames_tracked += 1
292
+
293
+ self._process_track_trajectory(state, position, current_frame)
294
+
295
+ # Collect motion samples when no confirmed reference OR during re-learning
296
+ if self._reference_source == ReferenceSource.NONE:
297
+ self._collect_motion_sample(state)
298
+
299
+ if state.state == WrongWayState.WRONG_WAY:
300
+ wrong_way_detections.append(self._build_detection_output(state))
301
+ if track_id not in self._confirmed_wrong_way_track_ids:
302
+ self._confirmed_wrong_way_track_ids.add(track_id)
303
+ logger.info(
304
+ f"[Frame {current_frame}] WRONG-WAY CONFIRMED: "
305
+ f"track_id={track_id}, category={state.category}, "
306
+ f"confidence={state.wrong_way_confidence:.2f}"
307
+ )
308
+ elif state.state == WrongWayState.SUSPECT:
309
+ suspect_detections.append(self._build_detection_output(state))
310
+
311
+ # Update auto-reference when source is NONE (initial learning or re-learning)
312
+ if self._reference_source == ReferenceSource.NONE:
313
+ self._update_auto_reference(current_frame)
314
+
315
+ self._cleanup_stale_tracks(seen_track_ids, current_frame)
316
+
317
+ return {
318
+ "reference_source": self._reference_source.value,
319
+ "reference_status": self._reference_status.value,
320
+ "current_wrong_way_count": len(wrong_way_detections),
321
+ "total_wrong_way_count": len(self._confirmed_wrong_way_track_ids),
322
+ "current_wrong_way_detections": wrong_way_detections,
323
+ "current_suspect_count": len(suspect_detections),
324
+ "current_suspect_detections": suspect_detections
325
+ }
326
+
327
+ def _get_or_create_state(
328
+ self,
329
+ track_id: Any,
330
+ category: str,
331
+ current_frame: int,
332
+ position: Tuple[float, float]
333
+ ) -> TrackMotionState:
334
+ if track_id not in self._track_states:
335
+ self._track_states[track_id] = TrackMotionState(
336
+ track_id=track_id,
337
+ category=category,
338
+ position_prev=position,
339
+ first_seen_frame=current_frame,
340
+ last_seen_frame=current_frame
341
+ )
342
+ logger.debug(f"[Frame {current_frame}] New track: {track_id}")
343
+ return self._track_states[track_id]
344
+
345
+ def _process_track_trajectory(
346
+ self,
347
+ state: TrackMotionState,
348
+ position: Tuple[float, float],
349
+ current_frame: int
350
+ ) -> None:
351
+ if state.position_prev is None:
352
+ state.position_prev = position
353
+ return
354
+
355
+ v_inst = (
356
+ position[0] - state.position_prev[0],
357
+ position[1] - state.position_prev[1]
358
+ )
359
+
360
+ state.velocity_ewma = (
361
+ self.alpha * v_inst[0] + (1 - self.alpha) * state.velocity_ewma[0],
362
+ self.alpha * v_inst[1] + (1 - self.alpha) * state.velocity_ewma[1]
363
+ )
364
+
365
+ state.position_prev = position
366
+
367
+ velocity_magnitude = math.sqrt(
368
+ state.velocity_ewma[0]**2 + state.velocity_ewma[1]**2
369
+ )
370
+
371
+ if velocity_magnitude < self.v_min:
372
+ state.wrong_way_confidence = max(0, state.wrong_way_confidence - self.gamma)
373
+ self._update_state_machine(state, direction_score=0.0)
374
+ return
375
+
376
+ if self._reference_status != ReferenceStatus.CONFIRMED:
377
+ state.last_direction_score = 0.0
378
+ return
379
+
380
+ v_hat = (
381
+ state.velocity_ewma[0] / velocity_magnitude,
382
+ state.velocity_ewma[1] / velocity_magnitude
383
+ )
384
+
385
+ ref_hat = self._reference_vector
386
+
387
+ direction_score = v_hat[0] * ref_hat[0] + v_hat[1] * ref_hat[1]
388
+ state.last_direction_score = direction_score
389
+
390
+ wrong_way_score = max(0, -direction_score)
391
+
392
+ state.wrong_way_confidence = max(0, min(1,
393
+ state.wrong_way_confidence + self.beta * wrong_way_score - self.gamma
394
+ ))
395
+
396
+ if direction_score > 0.5:
397
+ self._correct_direction_streak[state.track_id] = \
398
+ self._correct_direction_streak.get(state.track_id, 0) + 1
399
+ else:
400
+ self._correct_direction_streak[state.track_id] = 0
401
+
402
+ # New: track consecutive strong wrong-way evidence frames
403
+ if wrong_way_score > 0.4:
404
+ self._wrong_way_streak[state.track_id] = \
405
+ self._wrong_way_streak.get(state.track_id, 0) + 1
406
+ else:
407
+ self._wrong_way_streak[state.track_id] = 0
408
+
409
+ self._update_state_machine(state, direction_score)
410
+
411
+ logger.debug(
412
+ f"[Frame {current_frame}] Track {state.track_id}: "
413
+ f"v_mag={velocity_magnitude:.2f}, dir_score={direction_score:.2f}, "
414
+ f"conf={state.wrong_way_confidence:.2f}, state={state.state.value}"
415
+ )
416
+
417
+ def _update_state_machine(self, state: TrackMotionState, direction_score: float) -> None:
418
+ if state.state == WrongWayState.NORMAL:
419
+ if state.wrong_way_confidence >= self.c_suspect:
420
+ state.state = WrongWayState.SUSPECT
421
+ logger.debug(f"Track {state.track_id}: NORMAL → SUSPECT")
422
+
423
+ elif state.state == WrongWayState.SUSPECT:
424
+ streak = self._wrong_way_streak.get(state.track_id, 0)
425
+ if (state.wrong_way_confidence >= self.c_confirm and
426
+ streak >= self.min_confirm_frames):
427
+ state.state = WrongWayState.WRONG_WAY
428
+ logger.info(f"Track {state.track_id}: SUSPECT → WRONG_WAY "
429
+ f"(conf={state.wrong_way_confidence:.2f}, streak={streak})")
430
+ elif state.wrong_way_confidence < self.c_suspect * 0.5:
431
+ state.state = WrongWayState.NORMAL
432
+ logger.debug(f"Track {state.track_id}: SUSPECT → NORMAL")
433
+
434
+ elif state.state == WrongWayState.WRONG_WAY:
435
+ correct_streak = self._correct_direction_streak.get(state.track_id, 0)
436
+
437
+ if (state.wrong_way_confidence < self.c_decay_from_wrong and
438
+ correct_streak >= self.correct_direction_frames_to_decay):
439
+ state.state = WrongWayState.SUSPECT
440
+ logger.info(
441
+ f"Track {state.track_id}: WRONG_WAY → SUSPECT "
442
+ f"(correct_streak={correct_streak})"
443
+ )
444
+
445
+ def _collect_motion_sample(self, state: TrackMotionState) -> None:
446
+ velocity_magnitude = math.sqrt(
447
+ state.velocity_ewma[0]**2 + state.velocity_ewma[1]**2
448
+ )
449
+
450
+ if velocity_magnitude < self.v_min:
451
+ return
452
+
453
+ if state.frames_tracked < 5:
454
+ return
455
+
456
+ v_hat = (
457
+ state.velocity_ewma[0] / velocity_magnitude,
458
+ state.velocity_ewma[1] / velocity_magnitude
459
+ )
460
+
461
+ self._auto_ref_state.motion_samples.append({
462
+ "vector": v_hat,
463
+ "magnitude": velocity_magnitude,
464
+ "track_id": state.track_id
465
+ })
466
+
467
+ def _update_auto_reference(self, current_frame: int) -> None:
468
+ if current_frame < self.auto_ref_warmup_frames:
469
+ self._reference_status = ReferenceStatus.LEARNING
470
+ return
471
+
472
+ samples = list(self._auto_ref_state.motion_samples)
473
+
474
+ unique_tracks = set(s["track_id"] for s in samples)
475
+ if len(unique_tracks) < self.auto_ref_min_tracks:
476
+ self._reference_status = ReferenceStatus.LEARNING
477
+ return
478
+
479
+ avg_x = sum(s["vector"][0] * s["magnitude"] for s in samples)
480
+ avg_y = sum(s["vector"][1] * s["magnitude"] for s in samples)
481
+
482
+ magnitude = math.sqrt(avg_x*avg_x + avg_y*avg_y)
483
+ if magnitude < 1e-6:
484
+ self._reference_status = ReferenceStatus.LEARNING
485
+ return
486
+
487
+ guess = (avg_x / magnitude, avg_y / magnitude)
488
+
489
+ cluster_a_strength = 0.0
490
+ cluster_b_strength = 0.0
491
+ cluster_a_vectors = []
492
+ cluster_b_vectors = []
493
+
494
+ for sample in samples:
495
+ dot = sample["vector"][0] * guess[0] + sample["vector"][1] * guess[1]
496
+ if dot >= 0:
497
+ cluster_a_strength += sample["magnitude"]
498
+ cluster_a_vectors.append(sample)
499
+ else:
500
+ cluster_b_strength += sample["magnitude"]
501
+ cluster_b_vectors.append(sample)
502
+
503
+ total_strength = cluster_a_strength + cluster_b_strength
504
+ if total_strength < 1e-6:
505
+ return
506
+
507
+ dominance_ratio = max(cluster_a_strength, cluster_b_strength) / total_strength
508
+
509
+ if dominance_ratio < 0.65:
510
+ self._auto_ref_state.frames_since_last_change = 0
511
+ return
512
+
513
+ dominant_cluster = cluster_a_vectors if cluster_a_strength > cluster_b_strength else cluster_b_vectors
514
+
515
+ dom_x = sum(s["vector"][0] * s["magnitude"] for s in dominant_cluster)
516
+ dom_y = sum(s["vector"][1] * s["magnitude"] for s in dominant_cluster)
517
+ dom_mag = math.sqrt(dom_x*dom_x + dom_y*dom_y)
518
+
519
+ if dom_mag < 1e-6:
520
+ return
521
+
522
+ dominant_direction = (dom_x / dom_mag, dom_y / dom_mag)
523
+
524
+ if self._auto_ref_state.reference_vector is None:
525
+ self._auto_ref_state.reference_vector = dominant_direction
526
+ else:
527
+ self._auto_ref_state.reference_vector = (
528
+ self.auto_ref_alpha * dominant_direction[0] +
529
+ (1 - self.auto_ref_alpha) * self._auto_ref_state.reference_vector[0],
530
+ self.auto_ref_alpha * dominant_direction[1] +
531
+ (1 - self.auto_ref_alpha) * self._auto_ref_state.reference_vector[1]
532
+ )
533
+ ref_mag = math.sqrt(
534
+ self._auto_ref_state.reference_vector[0]**2 +
535
+ self._auto_ref_state.reference_vector[1]**2
536
+ )
537
+ if ref_mag > 1e-6:
538
+ self._auto_ref_state.reference_vector = (
539
+ self._auto_ref_state.reference_vector[0] / ref_mag,
540
+ self._auto_ref_state.reference_vector[1] / ref_mag
541
+ )
542
+
543
+ self._auto_ref_state.confidence = min(1.0,
544
+ self._auto_ref_state.confidence + 0.02 * dominance_ratio
545
+ )
546
+ self._auto_ref_state.frames_since_last_change += 1
547
+
548
+ if (self._auto_ref_state.confidence >= self.auto_ref_confirm_threshold and
549
+ self._auto_ref_state.frames_since_last_change >= self.auto_ref_stability_frames):
550
+
551
+ # Log direction change if this is a re-learn
552
+ if self._reference_vector is not None:
553
+ old_vec = self._reference_vector
554
+ new_vec = self._auto_ref_state.reference_vector
555
+ # Calculate angle difference
556
+ dot_product = old_vec[0] * new_vec[0] + old_vec[1] * new_vec[1]
557
+ angle_diff_deg = math.degrees(math.acos(max(-1, min(1, dot_product))))
558
+ logger.info(
559
+ f"[Frame {current_frame}] Auto-reference UPDATED: "
560
+ f"old=({old_vec[0]:.3f}, {old_vec[1]:.3f}) → "
561
+ f"new=({new_vec[0]:.3f}, {new_vec[1]:.3f}), "
562
+ f"angle_change={angle_diff_deg:.1f}°"
563
+ )
564
+
565
+ self._reference_vector = self._auto_ref_state.reference_vector
566
+ self._reference_source = ReferenceSource.AUTO
567
+ self._reference_status = ReferenceStatus.CONFIRMED
568
+
569
+ # Record confirmation frame for re-learn interval tracking
570
+ self._auto_ref_confirmed_at_frame = current_frame
571
+
572
+ logger.info(
573
+ f"[Frame {current_frame}] Auto-reference CONFIRMED: "
574
+ f"vector=({self._reference_vector[0]:.3f}, {self._reference_vector[1]:.3f}), "
575
+ f"confidence={self._auto_ref_state.confidence:.2f}"
576
+ )
577
+ else:
578
+ self._reference_status = ReferenceStatus.LEARNING
579
+
580
+ def _build_detection_output(self, state: TrackMotionState) -> Dict[str, Any]:
581
+ return {
582
+ "track_id": state.track_id,
583
+ "category": state.category,
584
+ "bbox": state.last_bbox,
585
+ "confidence": state.last_detection_confidence,
586
+ "wrong_way_confidence": round(state.wrong_way_confidence, 3),
587
+ "direction_score": round(state.last_direction_score, 3),
588
+ "state": state.state.value
589
+ }
590
+
591
+ def _cleanup_stale_tracks(self, seen_track_ids: Set[Any], current_frame: int) -> None:
592
+ stale_ids = [
593
+ tid for tid, state in self._track_states.items()
594
+ if tid not in seen_track_ids and
595
+ current_frame - state.last_seen_frame > self.stale_track_frames
596
+ ]
597
+
598
+ for tid in stale_ids:
599
+ del self._track_states[tid]
600
+ self._correct_direction_streak.pop(tid, None)
601
+ self._wrong_way_streak.pop(tid, None)
602
+ logger.debug(f"[Frame {current_frame}] Removed stale track: {tid}")
603
+
604
+ @staticmethod
605
+ def _get_bbox_bottom25_center(bbox: Dict[str, Any]) -> Tuple[float, float]:
606
+ if isinstance(bbox, dict):
607
+ if "xmin" in bbox:
608
+ x_center = (bbox["xmin"] + bbox["xmax"]) / 2
609
+ height = bbox["ymax"] - bbox["ymin"]
610
+ y_target = bbox["ymax"] - 0.25 * height
611
+ return (x_center, y_target)
612
+ elif "x1" in bbox:
613
+ x_center = (bbox["x1"] + bbox["x2"]) / 2
614
+ height = bbox["y2"] - bbox["y1"]
615
+ y_target = bbox["y2"] - 0.25 * height
616
+ return (x_center, y_target)
617
+ return (0, 0)
618
+
619
+ def reset(self) -> None:
620
+ self._track_states.clear()
621
+ self._correct_direction_streak.clear()
622
+ self._wrong_way_streak.clear()
623
+ self._confirmed_wrong_way_track_ids.clear()
624
+ self._auto_ref_state = AutoReferenceState()
625
+ self._auto_ref_confirmed_at_frame = 0
626
+ self._frame_count = 0
627
+
628
+ if self._reference_source != ReferenceSource.USER_ZONE:
629
+ self._reference_vector = None
630
+ self._reference_source = ReferenceSource.NONE
631
+ self._reference_status = ReferenceStatus.NONE
632
+
633
+ logger.info("WrongWayDetectionTracker reset")
634
+
635
+ def get_reference_info(self) -> Dict[str, Any]:
636
+ """Get current reference direction information including re-learn status."""
637
+ info = {
638
+ "source": self._reference_source.value,
639
+ "status": self._reference_status.value,
640
+ "vector": self._reference_vector,
641
+ "auto_confidence": self._auto_ref_state.confidence if self._reference_source == ReferenceSource.AUTO else None
642
+ }
643
+
644
+ # Add re-learn timing info for AUTO sources
645
+ if self._reference_source == ReferenceSource.AUTO or self._auto_ref_confirmed_at_frame > 0:
646
+ frames_since_confirm = self._frame_count - self._auto_ref_confirmed_at_frame
647
+ frames_until_relearn = max(0, self.auto_ref_relearn_interval_frames - frames_since_confirm)
648
+ info["frames_since_last_confirm"] = frames_since_confirm
649
+ info["frames_until_relearn"] = frames_until_relearn
650
+ info["relearn_interval_frames"] = self.auto_ref_relearn_interval_frames
651
+
652
+ return info
653
+
654
+ def get_stats(self) -> Dict[str, Any]:
655
+ return {
656
+ "active_tracks": len(self._track_states),
657
+ "total_wrong_way_confirmed": len(self._confirmed_wrong_way_track_ids),
658
+ "reference_source": self._reference_source.value,
659
+ "reference_status": self._reference_status.value,
660
+ "frame_count": self._frame_count,
661
+ "auto_ref_confirmed_at_frame": self._auto_ref_confirmed_at_frame
662
+ }
663
+
664
+ def __repr__(self) -> str:
665
+ return (
666
+ f"WrongWayDetectionTracker("
667
+ f"ref={self._reference_source.value}/{self._reference_status.value}, "
668
+ f"tracks={len(self._track_states)}, "
669
+ f"wrong_way={len(self._confirmed_wrong_way_track_ids)})"
670
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: matrice_analytics
3
- Version: 0.1.97
3
+ Version: 0.1.124
4
4
  Summary: Common server utilities for Matrice.ai services
5
5
  Author-email: "Matrice.ai" <dipendra@matrice.ai>
6
6
  License-Expression: MIT