matrice-analytics 0.1.97__py3-none-any.whl → 0.1.124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. matrice_analytics/post_processing/__init__.py +22 -0
  2. matrice_analytics/post_processing/advanced_tracker/config.py +8 -4
  3. matrice_analytics/post_processing/advanced_tracker/track_class_aggregator.py +128 -0
  4. matrice_analytics/post_processing/advanced_tracker/tracker.py +22 -1
  5. matrice_analytics/post_processing/config.py +17 -2
  6. matrice_analytics/post_processing/core/config.py +107 -1
  7. matrice_analytics/post_processing/face_reg/face_recognition.py +706 -73
  8. matrice_analytics/post_processing/face_reg/people_activity_logging.py +25 -14
  9. matrice_analytics/post_processing/post_processor.py +16 -0
  10. matrice_analytics/post_processing/usecases/__init__.py +9 -0
  11. matrice_analytics/post_processing/usecases/crowdflow.py +1088 -0
  12. matrice_analytics/post_processing/usecases/footfall.py +170 -22
  13. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +57 -38
  14. matrice_analytics/post_processing/usecases/parking_lot_analytics.py +1137 -0
  15. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +30 -4
  16. matrice_analytics/post_processing/usecases/vehicle_monitoring_drone_view.py +246 -3
  17. matrice_analytics/post_processing/usecases/vehicle_monitoring_parking_lot.py +36 -3
  18. matrice_analytics/post_processing/usecases/vehicle_monitoring_wrong_way.py +1021 -0
  19. matrice_analytics/post_processing/utils/__init__.py +5 -0
  20. matrice_analytics/post_processing/utils/agnostic_nms.py +759 -0
  21. matrice_analytics/post_processing/utils/alert_instance_utils.py +55 -7
  22. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +25 -2
  23. matrice_analytics/post_processing/utils/incident_manager_utils.py +12 -1
  24. matrice_analytics/post_processing/utils/parking_analytics_tracker.py +359 -0
  25. matrice_analytics/post_processing/utils/wrong_way_tracker.py +670 -0
  26. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/METADATA +1 -1
  27. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/RECORD +30 -23
  28. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/WHEEL +0 -0
  29. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/licenses/LICENSE.txt +0 -0
  30. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/top_level.txt +0 -0
@@ -63,6 +63,13 @@ except ImportError:
63
63
  aioredis = None
64
64
  HAS_AIREDIS = False
65
65
 
66
+ try:
67
+ import redis as redis_sync
68
+ HAS_REDIS_SYNC = True
69
+ except ImportError:
70
+ redis_sync = None # type: ignore[assignment]
71
+ HAS_REDIS_SYNC = False
72
+
66
73
  from ..core.base import (
67
74
  BaseProcessor,
68
75
  ProcessingContext,
@@ -116,6 +123,13 @@ class RedisFaceMatcher:
116
123
  """Handles Redis-based face similarity search."""
117
124
 
118
125
  ACTION_ID_PATTERN = re.compile(r"^[0-9a-f]{8,}$", re.IGNORECASE)
126
+ # Shared sync Redis client per-process (avoids asyncio loop binding issues when caller uses asyncio.run per frame)
127
+ _shared_sync_client = None
128
+ _shared_sync_client_sig: Optional[Tuple[Any, ...]] = None
129
+ _shared_sync_client_lock = threading.Lock()
130
+ # Shared app_deployment_id cache per-process (once resolved, reused across all instances/frames)
131
+ _shared_app_dep_id: Optional[str] = None
132
+ _shared_app_dep_id_lock = threading.Lock()
119
133
 
120
134
  def __init__(
121
135
  self,
@@ -148,6 +162,8 @@ class RedisFaceMatcher:
148
162
  os.getenv("FACE_RECOG_REDIS_STREAM_MAXLEN", "5000")
149
163
  )
150
164
  self._redis_client = None # type: ignore[assignment]
165
+ self._redis_client_loop_id: Optional[int] = None # Track which event loop owns the client
166
+ self._redis_sync_client = None # sync redis client (loop-agnostic)
151
167
  self._redis_connection_params: Optional[Dict[str, Any]] = None
152
168
  self._app_deployment_id = os.getenv("APP_DEPLOYMENT_ID")
153
169
  self._action_id = (
@@ -156,13 +172,166 @@ class RedisFaceMatcher:
156
172
  or self._discover_action_id()
157
173
  )
158
174
  self._redis_server_id = os.getenv("REDIS_SERVER_ID")
159
- self._app_dep_lock = asyncio.Lock()
160
- self._session_lock = asyncio.Lock()
161
- self._redis_lock = asyncio.Lock()
175
+ # Locks will be created per-loop to avoid cross-loop issues
176
+ self._app_dep_lock: Optional[asyncio.Lock] = None
177
+ self._session_lock: Optional[asyncio.Lock] = None
178
+ self._redis_lock: Optional[asyncio.Lock] = None
179
+ self._locks_loop_id: Optional[int] = None
162
180
  self._redis_warning_logged = False
163
181
 
182
+ def _get_current_loop_id(self) -> int:
183
+ """Get a unique identifier for the current running event loop."""
184
+ try:
185
+ loop = asyncio.get_running_loop()
186
+ return id(loop)
187
+ except RuntimeError:
188
+ return 0
189
+
190
+ def _ensure_locks_for_current_loop(self) -> None:
191
+ """Ensure locks are created for the current event loop."""
192
+ current_loop_id = self._get_current_loop_id()
193
+ if self._locks_loop_id != current_loop_id:
194
+ # Create new locks for this event loop
195
+ self._app_dep_lock = asyncio.Lock()
196
+ self._session_lock = asyncio.Lock()
197
+ self._redis_lock = asyncio.Lock()
198
+ self._locks_loop_id = current_loop_id
199
+
164
200
  def is_available(self) -> bool:
165
- return HAS_AIREDIS
201
+ return HAS_REDIS_SYNC or HAS_AIREDIS
202
+
203
+ def _get_app_dep_id_sync(self) -> Optional[str]:
204
+ """
205
+ Get app_deployment_id from env vars or class-level cache (SYNC - no async calls).
206
+
207
+ This avoids async operations that would fail when asyncio.run() creates a new loop per frame.
208
+ """
209
+ # Check instance cache first
210
+ if self._app_deployment_id:
211
+ return self._app_deployment_id
212
+
213
+ # Check class-level cache (shared across instances in this process)
214
+ with self.__class__._shared_app_dep_id_lock:
215
+ if self.__class__._shared_app_dep_id:
216
+ self._app_deployment_id = self.__class__._shared_app_dep_id
217
+ return self._app_deployment_id
218
+
219
+ # Try env var
220
+ env_app_dep_id = os.getenv("APP_DEPLOYMENT_ID")
221
+ if env_app_dep_id:
222
+ self._app_deployment_id = env_app_dep_id
223
+ with self.__class__._shared_app_dep_id_lock:
224
+ self.__class__._shared_app_dep_id = env_app_dep_id
225
+ return self._app_deployment_id
226
+
227
+ return None
228
+
229
+ def _get_redis_sync_client(self) -> Optional[Any]:
230
+ """
231
+ Get a **synchronous** redis client (SYNC method - no async at all).
232
+
233
+ Why sync and why no async calls?
234
+ - The py_inference worker calls `asyncio.run()` per frame, creating/closing a new event loop each call.
235
+ - `redis.asyncio` clients (and asyncio locks) are bound to the loop they were created on.
236
+ - Any cached async objects become invalid when the loop changes, causing:
237
+ - "Future attached to a different loop"
238
+ - "Event loop is closed"
239
+
240
+ This method is COMPLETELY SYNC:
241
+ - Uses only env vars for Redis connection (no async RPC to fetch params)
242
+ - Uses threading.Lock (not asyncio.Lock) for thread-safety
243
+ - Creates a sync redis client that is loop-agnostic and safe across frames
244
+
245
+ Required env vars (set at least one):
246
+ - FACE_RECOG_REDIS_URL or REDIS_URL: Full Redis URL
247
+ - OR: FACE_RECOG_REDIS_HOST + FACE_RECOG_REDIS_PORT
248
+ """
249
+ if not HAS_REDIS_SYNC or redis_sync is None:
250
+ return None
251
+
252
+ # Fast path: already have a cached client for this instance
253
+ if self._redis_sync_client is not None:
254
+ return self._redis_sync_client
255
+
256
+ # Resolve URL from env vars (NO async calls)
257
+ redis_url = self.redis_url
258
+ if not redis_url:
259
+ redis_url = os.getenv("FACE_RECOG_REDIS_URL") or os.getenv("REDIS_URL")
260
+ if not redis_url:
261
+ host = os.getenv("FACE_RECOG_REDIS_HOST")
262
+ port = os.getenv("FACE_RECOG_REDIS_PORT")
263
+ if host and port:
264
+ redis_url = f"redis://{host}:{port}/0"
265
+
266
+ if not redis_url:
267
+ # Cannot create sync client without connection info from env
268
+ # This will fall back to async path (which may fail, but that's expected)
269
+ self.logger.debug(
270
+ "No Redis URL/host available from env vars for sync client. "
271
+ "Set FACE_RECOG_REDIS_URL or FACE_RECOG_REDIS_HOST+FACE_RECOG_REDIS_PORT."
272
+ )
273
+ return None
274
+
275
+ # Cache the URL for future use
276
+ self.redis_url = redis_url
277
+
278
+ def _env_float(name: str, default: float) -> float:
279
+ raw = os.getenv(name)
280
+ if raw is None or raw == "":
281
+ return float(default)
282
+ try:
283
+ return float(raw)
284
+ except Exception:
285
+ return float(default)
286
+
287
+ # Socket timeouts: keep bounded so a bad redis doesn't stall the worker
288
+ socket_connect_timeout = max(
289
+ 0.2, _env_float("FACE_RECOG_REDIS_CONNECT_TIMEOUT_S", 1.0)
290
+ )
291
+ socket_timeout = max(
292
+ 0.2,
293
+ _env_float(
294
+ "FACE_RECOG_REDIS_SOCKET_TIMEOUT_S", float(max(1.0, self.response_timeout))
295
+ ),
296
+ )
297
+
298
+ sig = ("url", str(redis_url))
299
+
300
+ # Use threading.Lock (NOT asyncio.Lock) for process-level sharing
301
+ with self.__class__._shared_sync_client_lock:
302
+ if (
303
+ self.__class__._shared_sync_client is not None
304
+ and self.__class__._shared_sync_client_sig == sig
305
+ ):
306
+ self._redis_sync_client = self.__class__._shared_sync_client
307
+ return self._redis_sync_client
308
+
309
+ # Create new shared client
310
+ try:
311
+ client = redis_sync.Redis.from_url(
312
+ str(redis_url),
313
+ decode_responses=True,
314
+ health_check_interval=30,
315
+ socket_connect_timeout=socket_connect_timeout,
316
+ socket_timeout=socket_timeout,
317
+ retry_on_timeout=True,
318
+ )
319
+ self.logger.info(
320
+ "[SYNC] Created sync Redis client for face matcher: %s",
321
+ redis_url,
322
+ )
323
+ except Exception as exc:
324
+ self.logger.error(
325
+ "[SYNC] Failed to create sync Redis client for face matcher: %s",
326
+ exc,
327
+ exc_info=True,
328
+ )
329
+ return None
330
+
331
+ self.__class__._shared_sync_client = client
332
+ self.__class__._shared_sync_client_sig = sig
333
+ self._redis_sync_client = client
334
+ return self._redis_sync_client
166
335
 
167
336
  async def match_embedding(
168
337
  self,
@@ -172,10 +341,10 @@ class RedisFaceMatcher:
172
341
  min_confidence: Optional[float] = None,
173
342
  ) -> Optional[RedisFaceMatchResult]:
174
343
  """Send embedding to Redis stream and wait for match result."""
175
- if not HAS_AIREDIS:
344
+ if not (HAS_REDIS_SYNC or HAS_AIREDIS):
176
345
  if not self._redis_warning_logged:
177
346
  self.logger.warning(
178
- "redis.asyncio not available; skipping Redis face matcher flow"
347
+ "redis client not available; skipping Redis face matcher flow"
179
348
  )
180
349
  self._redis_warning_logged = True
181
350
  return None
@@ -191,15 +360,43 @@ class RedisFaceMatcher:
191
360
  print(f"WARNING: Embedding list has zero length for search_id={search_id}")
192
361
  return None
193
362
 
194
- app_dep_id = await self._ensure_app_deployment_id()
195
- if not app_dep_id:
196
- return None
363
+ resolved_search_id = str(search_id or self._generate_search_id())
197
364
 
198
- redis_client = await self._ensure_redis_client()
199
- if redis_client is None:
200
- return None
365
+ # ============================================================
366
+ # Preferred: SYNC redis client (loop-agnostic, safe when caller
367
+ # uses asyncio.run() per frame and closes the loop each call).
368
+ # This method is COMPLETELY SYNC - no async calls at all.
369
+ # ============================================================
370
+ redis_client_sync = None
371
+ if HAS_REDIS_SYNC:
372
+ try:
373
+ redis_client_sync = self._get_redis_sync_client()
374
+ except Exception:
375
+ redis_client_sync = None
376
+
377
+ # Get app_dep_id - for SYNC path, use only cached/env values (no async calls)
378
+ app_dep_id: Optional[str] = None
379
+ if redis_client_sync is not None:
380
+ # SYNC path: get app_dep_id from cache or env only (no async)
381
+ app_dep_id = self._get_app_dep_id_sync()
382
+ if not app_dep_id:
383
+ self.logger.warning(
384
+ "[SYNC] Cannot get app_deployment_id from env/cache. "
385
+ "Set APP_DEPLOYMENT_ID env var or ensure it was resolved previously."
386
+ )
387
+ # Fall back to async path below
388
+ redis_client_sync = None
389
+
390
+ # If we don't have sync client (or couldn't get app_dep_id sync), use async path
391
+ if redis_client_sync is None:
392
+ app_dep_id = await self._ensure_app_deployment_id()
393
+ if not app_dep_id:
394
+ return None
395
+ # Cache for future sync calls
396
+ with self.__class__._shared_app_dep_id_lock:
397
+ if self.__class__._shared_app_dep_id is None:
398
+ self.__class__._shared_app_dep_id = app_dep_id
201
399
 
202
- resolved_search_id = str(search_id or self._generate_search_id())
203
400
  payload = {
204
401
  "appDepId": app_dep_id,
205
402
  "searchId": resolved_search_id,
@@ -210,6 +407,163 @@ class RedisFaceMatcher:
210
407
  ),
211
408
  }
212
409
 
410
+ if redis_client_sync is not None:
411
+ try:
412
+ self.logger.debug(
413
+ f"[SYNC] Sending embedding to Redis stream {self.stream_name} with search_id={resolved_search_id}, "
414
+ f"embedding_len={len(embedding_list)}, minConfidence={payload.get('minConfidence')}"
415
+ )
416
+ redis_client_sync.xadd(
417
+ self.stream_name,
418
+ {"data": json.dumps(payload, separators=(",", ":"))},
419
+ maxlen=self.stream_maxlen,
420
+ approximate=True,
421
+ )
422
+ self.logger.debug(
423
+ f"[SYNC] Successfully sent embedding to Redis stream for search_id={resolved_search_id}"
424
+ )
425
+ except Exception as exc:
426
+ self.logger.error(
427
+ "[SYNC] Failed to enqueue face embedding to Redis stream %s: %s",
428
+ self.stream_name,
429
+ exc,
430
+ exc_info=True,
431
+ )
432
+ return None
433
+
434
+ result_key = f"{resolved_search_id}_{app_dep_id}"
435
+ deadline = time.monotonic() + self.response_timeout
436
+ poll_count = 0
437
+ start_poll_time = time.monotonic()
438
+
439
+ self.logger.debug(
440
+ f"[SYNC] Waiting for Redis response with key={result_key}, timeout={self.response_timeout:.3f}s"
441
+ )
442
+
443
+ while time.monotonic() < deadline:
444
+ try:
445
+ raw_value = redis_client_sync.get(result_key)
446
+ poll_count += 1
447
+ except Exception as exc:
448
+ self.logger.error(
449
+ "[SYNC] Failed to read Redis result for key %s: %s",
450
+ result_key,
451
+ exc,
452
+ exc_info=True,
453
+ )
454
+ return None
455
+
456
+ if raw_value:
457
+ try:
458
+ redis_client_sync.delete(result_key)
459
+ except Exception:
460
+ pass
461
+
462
+ try:
463
+ parsed = json.loads(raw_value)
464
+ except Exception as exc:
465
+ self.logger.error(
466
+ "[SYNC] Unable to parse Redis face match response: %s",
467
+ exc,
468
+ exc_info=True,
469
+ )
470
+ return None
471
+
472
+ # Log and print the raw Redis response for debugging
473
+ self.logger.info(
474
+ f"[SYNC] Redis raw response for search_id={resolved_search_id}: {parsed}"
475
+ )
476
+
477
+ match_data = None
478
+ if isinstance(parsed, list) and parsed:
479
+ match_data = parsed[0]
480
+ elif isinstance(parsed, dict):
481
+ match_data = parsed
482
+ else:
483
+ self.logger.warning(
484
+ "[SYNC] Redis response is neither list nor dict: %s, value: %s",
485
+ type(parsed),
486
+ parsed,
487
+ )
488
+
489
+ if not isinstance(match_data, dict):
490
+ self.logger.warning(
491
+ "[SYNC] match_data is not a dict after extraction: %s, value: %s",
492
+ type(match_data),
493
+ match_data,
494
+ )
495
+ return None
496
+
497
+ staff_id = match_data.get("staffId") or match_data.get("staff_id")
498
+ if not staff_id:
499
+ self.logger.warning(
500
+ "[SYNC] No staffId found in match_data: %s", match_data
501
+ )
502
+ return None
503
+
504
+ person_name = str(match_data.get("name") or "Unknown")
505
+ confidence = float(
506
+ match_data.get("conf") or match_data.get("confidence") or 0.0
507
+ )
508
+ employee_id = match_data.get("employeeId") or match_data.get(
509
+ "embeddingId"
510
+ )
511
+
512
+ min_conf = float(
513
+ min_confidence
514
+ if min_confidence is not None
515
+ else self.default_min_confidence
516
+ )
517
+ if confidence < min_conf:
518
+ self.logger.debug(
519
+ "[SYNC] Redis match confidence %.3f below threshold %.3f, rejecting",
520
+ confidence,
521
+ min_conf,
522
+ )
523
+ return None
524
+
525
+ result = RedisFaceMatchResult(
526
+ staff_id=str(staff_id),
527
+ person_name=person_name,
528
+ confidence=round(confidence, 3),
529
+ employee_id=str(employee_id) if employee_id else None,
530
+ raw=match_data,
531
+ )
532
+
533
+ poll_time = (time.monotonic() - start_poll_time) * 1000.0
534
+ self.logger.info(
535
+ "[SYNC] Redis match result created (polls=%d, poll_time=%.2fms): staff_id=%s, name=%s, conf=%.3f",
536
+ poll_count,
537
+ poll_time,
538
+ result.staff_id,
539
+ result.person_name,
540
+ result.confidence,
541
+ )
542
+ return result
543
+
544
+ time.sleep(self.poll_interval)
545
+
546
+ poll_time = (time.monotonic() - start_poll_time) * 1000.0
547
+ self.logger.warning(
548
+ "[SYNC] Timed out waiting for Redis face match result for key %s (timeout=%.3fs, polls=%d, poll_time=%.2fms)",
549
+ result_key,
550
+ self.response_timeout,
551
+ poll_count,
552
+ poll_time,
553
+ )
554
+ return None
555
+
556
+ # ============================================================
557
+ # Fallback: ASYNC redis client (kept for environments where
558
+ # sync redis is unavailable).
559
+ # ============================================================
560
+ if not HAS_AIREDIS:
561
+ return None
562
+
563
+ redis_client = await self._ensure_redis_client()
564
+ if redis_client is None:
565
+ return None
566
+
213
567
  try:
214
568
  self.logger.debug(
215
569
  f"Sending embedding to Redis stream {self.stream_name} with search_id={resolved_search_id}, "
@@ -222,6 +576,44 @@ class RedisFaceMatcher:
222
576
  approximate=True,
223
577
  )
224
578
  self.logger.debug(f"Successfully sent embedding to Redis stream for search_id={resolved_search_id}")
579
+ except RuntimeError as exc:
580
+ # Handle event loop closed/mismatch errors - invalidate client and retry once
581
+ exc_str = str(exc).lower()
582
+ if "event loop" in exc_str or "different loop" in exc_str or "closed" in exc_str:
583
+ self.logger.warning(
584
+ "Redis client event loop error detected, invalidating client and retrying: %s", exc
585
+ )
586
+ # Invalidate the client so next call recreates it
587
+ self._redis_client = None
588
+ self._redis_client_loop_id = None
589
+ # Retry once with fresh client
590
+ try:
591
+ redis_client = await self._ensure_redis_client()
592
+ if redis_client:
593
+ await redis_client.xadd(
594
+ self.stream_name,
595
+ {"data": json.dumps(payload, separators=(",", ":"))},
596
+ maxlen=self.stream_maxlen,
597
+ approximate=True,
598
+ )
599
+ self.logger.info(f"Successfully sent embedding after client refresh for search_id={resolved_search_id}")
600
+ else:
601
+ self.logger.error("Failed to recreate Redis client after event loop error")
602
+ return None
603
+ except Exception as retry_exc:
604
+ self.logger.error(
605
+ "Retry also failed after Redis client refresh: %s", retry_exc, exc_info=True
606
+ )
607
+ return None
608
+ else:
609
+ self.logger.error(
610
+ "Failed to enqueue face embedding to Redis stream %s: %s",
611
+ self.stream_name,
612
+ exc,
613
+ exc_info=True,
614
+ )
615
+ print(f"ERROR: Failed to send to Redis stream {self.stream_name}: {exc}")
616
+ return None
225
617
  except Exception as exc:
226
618
  self.logger.error(
227
619
  "Failed to enqueue face embedding to Redis stream %s: %s",
@@ -244,6 +636,24 @@ class RedisFaceMatcher:
244
636
  try:
245
637
  raw_value = await redis_client.get(result_key)
246
638
  poll_count += 1
639
+ except RuntimeError as exc:
640
+ # Handle event loop errors - invalidate client and return
641
+ exc_str = str(exc).lower()
642
+ if "event loop" in exc_str or "different loop" in exc_str or "closed" in exc_str:
643
+ self.logger.warning(
644
+ "Redis client event loop error in poll loop, invalidating client: %s", exc
645
+ )
646
+ self._redis_client = None
647
+ self._redis_client_loop_id = None
648
+ return None
649
+ self.logger.error(
650
+ "Failed to read Redis result for key %s: %s",
651
+ result_key,
652
+ exc,
653
+ exc_info=True,
654
+ )
655
+ print(f"ERROR: Failed to read Redis result for key {result_key}: {exc}")
656
+ return None
247
657
  except Exception as exc:
248
658
  self.logger.error(
249
659
  "Failed to read Redis result for key %s: %s",
@@ -374,6 +784,9 @@ class RedisFaceMatcher:
374
784
  if self._app_deployment_id:
375
785
  return self._app_deployment_id
376
786
 
787
+ # Ensure locks are valid for the current event loop
788
+ self._ensure_locks_for_current_loop()
789
+
377
790
  async with self._app_dep_lock:
378
791
  if self._app_deployment_id:
379
792
  return self._app_deployment_id
@@ -420,6 +833,10 @@ class RedisFaceMatcher:
420
833
  return None
421
834
 
422
835
  self._app_deployment_id = str(app_dep_id)
836
+ # Also cache at class level for sync path to use
837
+ with self.__class__._shared_app_dep_id_lock:
838
+ if self.__class__._shared_app_dep_id is None:
839
+ self.__class__._shared_app_dep_id = self._app_deployment_id
423
840
  if redis_server_id:
424
841
  self._redis_server_id = str(redis_server_id)
425
842
  self.logger.info(
@@ -437,6 +854,9 @@ class RedisFaceMatcher:
437
854
  )
438
855
  return self._session
439
856
 
857
+ # Ensure locks are valid for the current event loop
858
+ self._ensure_locks_for_current_loop()
859
+
440
860
  async with self._session_lock:
441
861
  if self._session:
442
862
  return self._session
@@ -469,12 +889,42 @@ class RedisFaceMatcher:
469
889
  return self._session
470
890
 
471
891
  async def _ensure_redis_client(self):
892
+ # Ensure locks are valid for the current event loop
893
+ self._ensure_locks_for_current_loop()
894
+
895
+ current_loop_id = self._get_current_loop_id()
896
+
897
+ # Check if we have a client but it was created in a different event loop
898
+ if self._redis_client is not None and self._redis_client_loop_id != current_loop_id:
899
+ self.logger.warning(
900
+ "Redis client was created in a different event loop (old=%s, current=%s). Recreating client.",
901
+ self._redis_client_loop_id,
902
+ current_loop_id,
903
+ )
904
+ # Close old client safely (ignore errors since the loop may be closed)
905
+ try:
906
+ await self._redis_client.close()
907
+ except Exception:
908
+ pass
909
+ self._redis_client = None
910
+ self._redis_client_loop_id = None
911
+
472
912
  if self._redis_client:
473
913
  return self._redis_client
474
914
 
475
915
  async with self._redis_lock:
476
- if self._redis_client:
916
+ # Double-check after acquiring lock
917
+ if self._redis_client is not None and self._redis_client_loop_id == current_loop_id:
477
918
  return self._redis_client
919
+
920
+ # Reset client if loop mismatch detected inside lock
921
+ if self._redis_client is not None and self._redis_client_loop_id != current_loop_id:
922
+ try:
923
+ await self._redis_client.close()
924
+ except Exception:
925
+ pass
926
+ self._redis_client = None
927
+ self._redis_client_loop_id = None
478
928
 
479
929
  if not self.redis_url:
480
930
  host = os.getenv("FACE_RECOG_REDIS_HOST")
@@ -489,10 +939,12 @@ class RedisFaceMatcher:
489
939
  decode_responses=True,
490
940
  health_check_interval=30,
491
941
  )
942
+ self._redis_client_loop_id = current_loop_id
492
943
  self.logger.info(
493
- "Connected Redis face matcher client to %s (stream=%s)",
944
+ "Connected Redis face matcher client to %s (stream=%s, loop_id=%s)",
494
945
  self.redis_url,
495
946
  self.stream_name,
947
+ current_loop_id,
496
948
  )
497
949
  return self._redis_client
498
950
  except Exception as exc:
@@ -503,6 +955,7 @@ class RedisFaceMatcher:
503
955
  exc_info=True,
504
956
  )
505
957
  self._redis_client = None
958
+ self._redis_client_loop_id = None
506
959
 
507
960
  conn_params = await self._ensure_redis_connection_params()
508
961
  if not conn_params:
@@ -525,12 +978,14 @@ class RedisFaceMatcher:
525
978
  retry_on_timeout=True,
526
979
  health_check_interval=30,
527
980
  )
981
+ self._redis_client_loop_id = current_loop_id
528
982
  self.logger.info(
529
- "Connected Redis face matcher client to %s:%s (db=%s, stream=%s)",
983
+ "Connected Redis face matcher client to %s:%s (db=%s, stream=%s, loop_id=%s)",
530
984
  conn_params.get("host"),
531
985
  conn_params.get("port"),
532
986
  conn_params.get("db"),
533
987
  self.stream_name,
988
+ current_loop_id,
534
989
  )
535
990
  except Exception as exc:
536
991
  self.logger.error(
@@ -539,6 +994,7 @@ class RedisFaceMatcher:
539
994
  exc_info=True,
540
995
  )
541
996
  self._redis_client = None
997
+ self._redis_client_loop_id = None
542
998
 
543
999
  return self._redis_client
544
1000
 
@@ -659,7 +1115,7 @@ class TemporalIdentityManager:
659
1115
  face_client: FacialRecognitionClient,
660
1116
  embedding_manager=None,
661
1117
  redis_matcher: Optional[RedisFaceMatcher] = None,
662
- recognition_threshold: float = 0.3,
1118
+ recognition_threshold: float = 0.15,
663
1119
  history_size: int = 20,
664
1120
  unknown_patience: int = 7,
665
1121
  switch_patience: int = 5,
@@ -1081,9 +1537,9 @@ class FaceRecognitionEmbeddingConfig(BaseConfig):
1081
1537
  smoothing_confidence_range_factor: float = 0.5
1082
1538
 
1083
1539
  # Base confidence threshold (separate from embedding similarity threshold)
1084
- similarity_threshold: float = 0.3 # Lowered to match local code - 0.45 was too conservative
1540
+ similarity_threshold: float = 0.45 # 0.3 Lowered to match local code - 0.45 was too conservative
1085
1541
  # Base confidence threshold (separate from embedding similarity threshold)
1086
- confidence_threshold: float = 0.06 # Detection confidence threshold
1542
+ confidence_threshold: float = 0.1 # 0.06 Detection confidence threshold
1087
1543
 
1088
1544
  # Face recognition optional features
1089
1545
  enable_face_tracking: bool = True # Enable BYTE TRACKER advanced face tracking -- KEEP IT TRUE ALWAYS
@@ -1345,6 +1801,16 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1345
1801
  """
1346
1802
  Extract camera_name, camera_id, and location_id from stream_info.
1347
1803
 
1804
+ Handles multiple sources and shapes for camera_id/camera_name/location_id to ensure
1805
+ correct extraction even when a single container is connected to multiple camera streams.
1806
+
1807
+ Supported stream_info shapes (seen across pipeline/components):
1808
+ - stream_info["camera_info"]
1809
+ - stream_info["input_settings"]["camera_info"]
1810
+ - stream_info["input_settings"]["input_stream"]["camera_info"]
1811
+ - stream_info["input_streams"][i]["input_stream"]["camera_info"]
1812
+ - camera_id derived from stream_info["topic"] suffix markers ("_input_topic" or "_input-topic")
1813
+
1348
1814
  Args:
1349
1815
  stream_info: Stream information dictionary
1350
1816
 
@@ -1355,22 +1821,201 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1355
1821
  camera_id = ""
1356
1822
  location_id = ""
1357
1823
 
1358
- if not stream_info:
1824
+ if not stream_info or not isinstance(stream_info, dict):
1825
+ self.logger.debug("stream_info is None/invalid, returning empty camera info")
1359
1826
  return {"camera_name": camera_name, "camera_id": camera_id, "location_id": location_id}
1360
-
1361
- # Extract camera_name from camera_info
1362
- camera_info = stream_info.get("camera_info", {})
1363
- if camera_info:
1364
- camera_name = camera_info.get("camera_name", "")
1365
- location_id = camera_info.get("location", "")
1366
-
1367
- # Extract camera_id from topic (format: {camera_id}_input_topic)
1368
- topic = stream_info.get("topic", "")
1369
- if topic and "_input_topic" in topic:
1370
- camera_id = topic.replace("_input_topic", "")
1371
-
1372
- self.logger.debug(f"Extracted camera info - camera_name: '{camera_name}', camera_id: '{camera_id}', location_id: '{location_id}'")
1373
-
1827
+
1828
+ def _to_str(value: Any) -> str:
1829
+ """Convert common stream-info values to a safe string."""
1830
+ if value is None:
1831
+ return ""
1832
+ if isinstance(value, str):
1833
+ return value.strip()
1834
+ if isinstance(value, (int, float)):
1835
+ return str(value)
1836
+ if isinstance(value, dict):
1837
+ return ""
1838
+ if isinstance(value, (list, tuple, set)):
1839
+ for item in value:
1840
+ s = _to_str(item)
1841
+ if s:
1842
+ return s
1843
+ return ""
1844
+ try:
1845
+ return str(value).strip()
1846
+ except Exception:
1847
+ return ""
1848
+
1849
+ def _dict_get_str(d: Any, *keys: str) -> str:
1850
+ """Get first non-empty key from dict as string."""
1851
+ if not isinstance(d, dict):
1852
+ return ""
1853
+ for k in keys:
1854
+ val = _to_str(d.get(k))
1855
+ if val:
1856
+ return val
1857
+ return ""
1858
+
1859
+ def _extract_camera_id_from_topic(topic_val: Any) -> str:
1860
+ """Extract camera_id from topic formats like '{camera_id}_input_topic' or '{camera_id}_input-topic'."""
1861
+ topic = _to_str(topic_val)
1862
+ if not topic:
1863
+ return ""
1864
+ for suffix in ("_input_topic", "_input-topic"):
1865
+ if topic.endswith(suffix):
1866
+ return topic[: -len(suffix)].strip()
1867
+ for marker in ("_input_topic", "_input-topic"):
1868
+ if marker in topic:
1869
+ return topic.split(marker)[0].strip()
1870
+ return ""
1871
+
1872
+ def _extract_camera_id_from_frame_id(frame_id_val: Any) -> str:
1873
+ """
1874
+ Best-effort fallback: extract a stable camera/stream identifier from frame_id.
1875
+
1876
+ Observed upstream format (py_inference legacy mode):
1877
+ - 'legacy_{hexId}_{suffix}'
1878
+ Example:
1879
+ - 'legacy_694e7603a086e13d9c95dd3d_51b30e93'
1880
+
1881
+ We ONLY accept the middle segment if it looks like a hex identifier (>= 8 chars)
1882
+ to avoid mis-parsing arbitrary frame_id formats.
1883
+ """
1884
+ fid = _to_str(frame_id_val)
1885
+ if not fid:
1886
+ return ""
1887
+ if fid.startswith("legacy_"):
1888
+ parts = fid.split("_")
1889
+ if len(parts) >= 3:
1890
+ candidate = parts[1].strip()
1891
+ if candidate and re.fullmatch(r"[0-9a-f]{8,}", candidate, re.IGNORECASE):
1892
+ return candidate
1893
+ return ""
1894
+
1895
+ input_settings = stream_info.get("input_settings") or {}
1896
+ if not isinstance(input_settings, dict):
1897
+ input_settings = {}
1898
+
1899
+ camera_info_root = stream_info.get("camera_info") or {}
1900
+ if not isinstance(camera_info_root, dict):
1901
+ camera_info_root = {}
1902
+
1903
+ camera_info_input_settings = input_settings.get("camera_info") or {}
1904
+ if not isinstance(camera_info_input_settings, dict):
1905
+ camera_info_input_settings = {}
1906
+
1907
+ input_stream = input_settings.get("input_stream") or {}
1908
+ if not isinstance(input_stream, dict):
1909
+ input_stream = {}
1910
+
1911
+ camera_info_input_stream = input_stream.get("camera_info") or {}
1912
+ if not isinstance(camera_info_input_stream, dict):
1913
+ camera_info_input_stream = {}
1914
+
1915
+ input_streams = stream_info.get("input_streams") or []
1916
+ input_stream_candidates: List[Dict[str, Any]] = []
1917
+ camera_info_input_streams: List[Dict[str, Any]] = []
1918
+ if isinstance(input_streams, list):
1919
+ for item in input_streams:
1920
+ if not isinstance(item, dict):
1921
+ continue
1922
+ inner = item.get("input_stream", item)
1923
+ if not isinstance(inner, dict):
1924
+ continue
1925
+ input_stream_candidates.append(inner)
1926
+ ci = inner.get("camera_info") or {}
1927
+ if isinstance(ci, dict) and ci:
1928
+ camera_info_input_streams.append(ci)
1929
+
1930
+ topic_camera_id = (
1931
+ _extract_camera_id_from_topic(stream_info.get("topic"))
1932
+ or _extract_camera_id_from_topic(input_settings.get("topic"))
1933
+ )
1934
+ if not topic_camera_id:
1935
+ topics_val = stream_info.get("topics")
1936
+ if isinstance(topics_val, (list, tuple, set)):
1937
+ for t in topics_val:
1938
+ topic_camera_id = _extract_camera_id_from_topic(t)
1939
+ if topic_camera_id:
1940
+ break
1941
+
1942
+ camera_info_sources: List[Dict[str, Any]] = [
1943
+ camera_info_root,
1944
+ camera_info_input_settings,
1945
+ camera_info_input_stream,
1946
+ ] + camera_info_input_streams
1947
+
1948
+ def _camera_id_from_camera_info(ci: Dict[str, Any]) -> str:
1949
+ return _dict_get_str(ci, "camera_id", "cameraId", "_id", "id")
1950
+
1951
+ matched_ci: Dict[str, Any] = {}
1952
+ if topic_camera_id:
1953
+ camera_id = topic_camera_id
1954
+ for ci in camera_info_sources:
1955
+ if _camera_id_from_camera_info(ci) == camera_id:
1956
+ matched_ci = ci
1957
+ break
1958
+
1959
+ if not camera_id:
1960
+ camera_id = (
1961
+ _dict_get_str(stream_info, "camera_id", "cameraId")
1962
+ or _dict_get_str(input_settings, "camera_id", "cameraId")
1963
+ or _camera_id_from_camera_info(camera_info_root)
1964
+ or _camera_id_from_camera_info(camera_info_input_settings)
1965
+ or _camera_id_from_camera_info(camera_info_input_stream)
1966
+ )
1967
+ if not camera_id:
1968
+ for candidate in input_stream_candidates:
1969
+ camera_id = _dict_get_str(candidate, "camera_id", "cameraId", "_id", "id")
1970
+ if camera_id:
1971
+ break
1972
+ if not camera_id:
1973
+ camera_id = topic_camera_id
1974
+
1975
+ # Final fallback: derive camera_id from frame_id if stream_info doesn't include camera_info/topic
1976
+ if not camera_id:
1977
+ camera_id = _extract_camera_id_from_frame_id(stream_info.get("frame_id"))
1978
+
1979
+ # Use matched camera_info (if found) to set camera_name/location_id
1980
+ if matched_ci:
1981
+ camera_name = _dict_get_str(matched_ci, "camera_name", "cameraName", "name")
1982
+ location_id = _dict_get_str(matched_ci, "location", "location_id", "locationId")
1983
+
1984
+ if not camera_name:
1985
+ camera_name = (
1986
+ _dict_get_str(camera_info_root, "camera_name", "cameraName", "name")
1987
+ or _dict_get_str(camera_info_input_settings, "camera_name", "cameraName", "name")
1988
+ or _dict_get_str(camera_info_input_stream, "camera_name", "cameraName", "name")
1989
+ or _dict_get_str(stream_info, "camera_name", "cameraName")
1990
+ or _dict_get_str(input_settings, "camera_name", "cameraName")
1991
+ )
1992
+ if not camera_name:
1993
+ for ci in camera_info_input_streams:
1994
+ camera_name = _dict_get_str(ci, "camera_name", "cameraName", "name")
1995
+ if camera_name:
1996
+ break
1997
+
1998
+ if not location_id:
1999
+ location_id = (
2000
+ _dict_get_str(camera_info_root, "location", "location_id", "locationId")
2001
+ or _dict_get_str(camera_info_input_settings, "location", "location_id", "locationId")
2002
+ or _dict_get_str(camera_info_input_stream, "location", "location_id", "locationId")
2003
+ or _dict_get_str(stream_info, "location_id", "location", "locationId")
2004
+ or _dict_get_str(input_settings, "location_id", "location", "locationId")
2005
+ )
2006
+ if not location_id:
2007
+ for ci in camera_info_input_streams:
2008
+ location_id = _dict_get_str(ci, "location", "location_id", "locationId")
2009
+ if location_id:
2010
+ break
2011
+
2012
+ self.logger.debug(
2013
+ "Extracted camera info - camera_name: '%s', camera_id: '%s', location_id: '%s'",
2014
+ camera_name,
2015
+ camera_id,
2016
+ location_id,
2017
+ )
2018
+
1374
2019
  return {"camera_name": camera_name, "camera_id": camera_id, "location_id": location_id}
1375
2020
 
1376
2021
  async def _fetch_location_name(self, location_id: str) -> str:
@@ -2111,7 +2756,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
2111
2756
 
2112
2757
  if not is_truly_unknown and detection_type == "known":
2113
2758
  # Mark recognized and ensure it is not counted as unknown anymore
2114
- self._track_person(staff_id)
2759
+ self._track_person(staff_id, camera_id=camera_id)
2115
2760
  with self._tracking_lock:
2116
2761
  if internal_tid is not None:
2117
2762
  self._unknown_track_ids.discard(internal_tid)
@@ -2178,14 +2823,14 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
2178
2823
  detection["error"] = error
2179
2824
  return detection
2180
2825
 
2181
- def _track_person(self, person_id: str) -> None:
2826
+ def _track_person(self, person_id: str, camera_id: str = "") -> None:
2182
2827
  """Track person with camera ID and UTC timestamp"""
2183
2828
  if person_id not in self.person_tracking:
2184
2829
  self.person_tracking[person_id] = []
2185
2830
 
2186
- # Add current detection
2831
+ # Add current detection with actual camera_id
2187
2832
  detection_record = {
2188
- "camera_id": "test_camera_001", # TODO: Get from stream_info in production
2833
+ "camera_id": camera_id or "",
2189
2834
  "timestamp": datetime.utcnow().isoformat() + "Z",
2190
2835
  }
2191
2836
  self.person_tracking[person_id].append(detection_record)
@@ -2382,44 +3027,32 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
2382
3027
  stream_info, precision=True
2383
3028
  )
2384
3029
 
2385
- # Build total_counts array in expected format
2386
- total_counts = []
2387
- for cat, count in total_counts_dict.items():
2388
- if count > 0:
2389
- total_counts.append({"category": cat, "count": count})
2390
-
2391
- # Add face recognition specific total counts
3030
+ # Build total_counts array - only "Recognized Faces" and "Unknown Faces"
3031
+ # Note: We exclude generic "face" category to avoid duplicate/confusing fields
2392
3032
  session_totals = face_summary.get("session_totals", {})
2393
- total_counts.extend(
2394
- [
2395
- {
2396
- "category": "recognized_faces",
2397
- "count": session_totals.get("total_recognized", 0),
2398
- },
2399
- {
2400
- "category": "unknown_faces",
2401
- "count": session_totals.get("total_unknown", 0),
2402
- },
2403
- ]
2404
- )
2405
-
2406
- # Build current_counts array in expected format
2407
- current_counts = []
2408
- # for cat, count in per_category_count.items():
2409
- # if count > 0 or total_detections > 0:
2410
- # current_counts.append({"category": cat, "count": count})
3033
+ total_counts = [
3034
+ {
3035
+ "category": "Recognized Faces",
3036
+ "count": session_totals.get("total_recognized", 0),
3037
+ },
3038
+ {
3039
+ "category": "Unknown Faces",
3040
+ "count": session_totals.get("total_unknown", 0),
3041
+ },
3042
+ ]
2411
3043
 
2412
- # Add face recognition specific current counts
3044
+ # Build current_counts array - only "Recognized Faces" and "Unknown Faces"
2413
3045
  current_frame = face_summary.get("current_frame", {})
2414
- current_counts.extend(
2415
- [
2416
- {
2417
- "category": "Recognized Faces",
2418
- "count": current_frame.get("recognized", 0),
2419
- },
2420
- {"category": "Unknown Faces", "count": current_frame.get("unknown", 0)},
2421
- ]
2422
- )
3046
+ current_counts = [
3047
+ {
3048
+ "category": "Recognized Faces",
3049
+ "count": current_frame.get("recognized", 0),
3050
+ },
3051
+ {
3052
+ "category": "Unknown Faces",
3053
+ "count": current_frame.get("unknown", 0),
3054
+ },
3055
+ ]
2423
3056
 
2424
3057
  # Prepare detections with face recognition info
2425
3058
  detections = []