matrice-analytics 0.1.70__py3-none-any.whl → 0.1.96__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. matrice_analytics/post_processing/__init__.py +8 -2
  2. matrice_analytics/post_processing/config.py +4 -2
  3. matrice_analytics/post_processing/core/base.py +1 -1
  4. matrice_analytics/post_processing/core/config.py +40 -3
  5. matrice_analytics/post_processing/face_reg/face_recognition.py +1014 -201
  6. matrice_analytics/post_processing/face_reg/face_recognition_client.py +171 -29
  7. matrice_analytics/post_processing/face_reg/people_activity_logging.py +19 -0
  8. matrice_analytics/post_processing/post_processor.py +4 -0
  9. matrice_analytics/post_processing/usecases/__init__.py +4 -1
  10. matrice_analytics/post_processing/usecases/advanced_customer_service.py +913 -500
  11. matrice_analytics/post_processing/usecases/color_detection.py +19 -18
  12. matrice_analytics/post_processing/usecases/customer_service.py +356 -9
  13. matrice_analytics/post_processing/usecases/fire_detection.py +241 -23
  14. matrice_analytics/post_processing/usecases/footfall.py +750 -0
  15. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +638 -40
  16. matrice_analytics/post_processing/usecases/people_counting.py +66 -33
  17. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +35 -34
  18. matrice_analytics/post_processing/usecases/weapon_detection.py +2 -1
  19. matrice_analytics/post_processing/utils/alert_instance_utils.py +1018 -0
  20. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1338 -0
  21. matrice_analytics/post_processing/utils/incident_manager_utils.py +1754 -0
  22. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/METADATA +1 -1
  23. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/RECORD +26 -22
  24. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/WHEEL +0 -0
  25. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/licenses/LICENSE.txt +0 -0
  26. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/top_level.txt +0 -0
@@ -26,7 +26,10 @@ Configuration options:
26
26
  import subprocess
27
27
  import logging
28
28
  import asyncio
29
+ import json
29
30
  import os
31
+ import re
32
+ from pathlib import Path
30
33
  log_file = open("pip_jetson_btii.log", "w")
31
34
  cmd = ["pip", "install", "httpx"]
32
35
  subprocess.run(
@@ -37,7 +40,7 @@ subprocess.run(
37
40
  )
38
41
  log_file.close()
39
42
 
40
- from typing import Any, Dict, List, Optional, Tuple
43
+ from typing import Any, Dict, List, Optional, Tuple, NamedTuple
41
44
  import time
42
45
  import base64
43
46
  import cv2
@@ -46,6 +49,20 @@ import threading
46
49
  from datetime import datetime, timezone
47
50
  from collections import deque
48
51
 
52
+ try:
53
+ from matrice_common.session import Session
54
+ HAS_MATRICE_SESSION = True
55
+ except ImportError:
56
+ Session = None
57
+ HAS_MATRICE_SESSION = False
58
+
59
+ try:
60
+ import redis.asyncio as aioredis
61
+ HAS_AIREDIS = True
62
+ except ImportError:
63
+ aioredis = None
64
+ HAS_AIREDIS = False
65
+
49
66
  from ..core.base import (
50
67
  BaseProcessor,
51
68
  ProcessingContext,
@@ -65,9 +82,13 @@ from .face_recognition_client import FacialRecognitionClient
65
82
  from .people_activity_logging import PeopleActivityLogging
66
83
  from .embedding_manager import EmbeddingManager, EmbeddingConfig
67
84
 
85
+ # Cache for location names to avoid repeated API calls
86
+ _location_name_cache: Dict[str, str] = {}
87
+
68
88
 
69
89
  # ---- Lightweight identity tracking and temporal smoothing (adapted from compare_similarity.py) ---- #
70
90
  from collections import deque, defaultdict
91
+ from matrice_common.session import Session
71
92
 
72
93
 
73
94
 
@@ -83,6 +104,545 @@ def _normalize_embedding(vec: List[float]) -> List[float]:
83
104
  return arr.tolist()
84
105
 
85
106
 
107
+ class RedisFaceMatchResult(NamedTuple):
108
+ staff_id: Optional[str]
109
+ person_name: str
110
+ confidence: float
111
+ employee_id: Optional[str]
112
+ raw: Dict[str, Any]
113
+
114
+
115
+ class RedisFaceMatcher:
116
+ """Handles Redis-based face similarity search."""
117
+
118
+ ACTION_ID_PATTERN = re.compile(r"^[0-9a-f]{8,}$", re.IGNORECASE)
119
+
120
+ def __init__(
121
+ self,
122
+ session=None,
123
+ logger: Optional[logging.Logger] = None,
124
+ redis_url: Optional[str] = None,
125
+ face_client=None,
126
+ ) -> None:
127
+ self.logger = logger or logging.getLogger(__name__)
128
+ self._session = session
129
+ self.face_client = face_client
130
+ self.redis_url = (
131
+ redis_url
132
+ or os.getenv("FACE_RECOG_REDIS_URL")
133
+ or os.getenv("REDIS_URL")
134
+ )
135
+ self.stream_name = os.getenv(
136
+ "FACE_RECOG_REDIS_STREAM", "facial_detection_stream"
137
+ )
138
+ self.default_min_confidence = float(
139
+ os.getenv("FACE_RECOG_REDIS_MIN_CONFIDENCE", "0.01")
140
+ )
141
+ self.response_timeout = (
142
+ float(os.getenv("FACE_RECOG_REDIS_RESPONSE_TIMEOUT_MS", "200")) / 1000.0 # Reduced from 600ms to 200ms for faster failure
143
+ )
144
+ self.poll_interval = (
145
+ float(os.getenv("FACE_RECOG_REDIS_POLL_INTERVAL_MS", "5")) / 1000.0 # Reduced from 20ms to 5ms for faster polling
146
+ )
147
+ self.stream_maxlen = int(
148
+ os.getenv("FACE_RECOG_REDIS_STREAM_MAXLEN", "5000")
149
+ )
150
+ self._redis_client = None # type: ignore[assignment]
151
+ self._redis_connection_params: Optional[Dict[str, Any]] = None
152
+ self._app_deployment_id = os.getenv("APP_DEPLOYMENT_ID")
153
+ self._action_id = (
154
+ os.getenv("ACTION_ID")
155
+ or os.getenv("MATRISE_ACTION_ID")
156
+ or self._discover_action_id()
157
+ )
158
+ self._redis_server_id = os.getenv("REDIS_SERVER_ID")
159
+ self._app_dep_lock = asyncio.Lock()
160
+ self._session_lock = asyncio.Lock()
161
+ self._redis_lock = asyncio.Lock()
162
+ self._redis_warning_logged = False
163
+
164
+ def is_available(self) -> bool:
165
+ return HAS_AIREDIS
166
+
167
+ async def match_embedding(
168
+ self,
169
+ embedding: List[float],
170
+ search_id: Optional[str],
171
+ location: str = "",
172
+ min_confidence: Optional[float] = None,
173
+ ) -> Optional[RedisFaceMatchResult]:
174
+ """Send embedding to Redis stream and wait for match result."""
175
+ if not HAS_AIREDIS:
176
+ if not self._redis_warning_logged:
177
+ self.logger.warning(
178
+ "redis.asyncio not available; skipping Redis face matcher flow"
179
+ )
180
+ self._redis_warning_logged = True
181
+ return None
182
+
183
+ embedding_list = self._prepare_embedding_list(embedding)
184
+ if not embedding_list:
185
+ self.logger.warning(f"Empty embedding list for search_id={search_id}, cannot send to Redis")
186
+ print(f"WARNING: Empty embedding list for search_id={search_id}, cannot send to Redis")
187
+ return None
188
+
189
+ if len(embedding_list) == 0:
190
+ self.logger.warning(f"Embedding list has zero length for search_id={search_id}")
191
+ print(f"WARNING: Embedding list has zero length for search_id={search_id}")
192
+ return None
193
+
194
+ app_dep_id = await self._ensure_app_deployment_id()
195
+ if not app_dep_id:
196
+ return None
197
+
198
+ redis_client = await self._ensure_redis_client()
199
+ if redis_client is None:
200
+ return None
201
+
202
+ resolved_search_id = str(search_id or self._generate_search_id())
203
+ payload = {
204
+ "appDepId": app_dep_id,
205
+ "searchId": resolved_search_id,
206
+ "embedding": embedding_list,
207
+ "location": location or "",
208
+ "minConfidence": float(
209
+ min_confidence if min_confidence is not None else self.default_min_confidence
210
+ ),
211
+ }
212
+
213
+ try:
214
+ self.logger.debug(
215
+ f"Sending embedding to Redis stream {self.stream_name} with search_id={resolved_search_id}, "
216
+ f"embedding_len={len(embedding_list)}, minConfidence={payload.get('minConfidence')}"
217
+ )
218
+ await redis_client.xadd(
219
+ self.stream_name,
220
+ {"data": json.dumps(payload, separators=(",", ":"))},
221
+ maxlen=self.stream_maxlen,
222
+ approximate=True,
223
+ )
224
+ self.logger.debug(f"Successfully sent embedding to Redis stream for search_id={resolved_search_id}")
225
+ except Exception as exc:
226
+ self.logger.error(
227
+ "Failed to enqueue face embedding to Redis stream %s: %s",
228
+ self.stream_name,
229
+ exc,
230
+ exc_info=True,
231
+ )
232
+ print(f"ERROR: Failed to send to Redis stream {self.stream_name}: {exc}")
233
+ return None
234
+
235
+ result_key = f"{resolved_search_id}_{app_dep_id}"
236
+ deadline = time.monotonic() + self.response_timeout
237
+ poll_count = 0
238
+ start_poll_time = time.monotonic()
239
+
240
+ self.logger.debug(f"Waiting for Redis response with key={result_key}, timeout={self.response_timeout:.3f}s")
241
+
242
+ # Poll loop - check immediately first, then with intervals
243
+ while time.monotonic() < deadline:
244
+ try:
245
+ raw_value = await redis_client.get(result_key)
246
+ poll_count += 1
247
+ except Exception as exc:
248
+ self.logger.error(
249
+ "Failed to read Redis result for key %s: %s",
250
+ result_key,
251
+ exc,
252
+ exc_info=True,
253
+ )
254
+ print(f"ERROR: Failed to read Redis result for key {result_key}: {exc}")
255
+ return None
256
+
257
+ if raw_value:
258
+ await redis_client.delete(result_key)
259
+ try:
260
+ parsed = json.loads(raw_value)
261
+ except Exception as exc:
262
+ parsed = json.loads(raw_value)
263
+ self.logger.error(
264
+ "Unable to parse Redis face match response: %s",
265
+ exc,
266
+ exc_info=True,
267
+ )
268
+ print(f"ERROR: Unable to parse Redis face match response: {exc}")
269
+ #return None
270
+
271
+ # Log and print the raw Redis response for debugging
272
+ self.logger.info(f"Redis raw response for search_id={resolved_search_id}: {parsed}")
273
+ print(f"Redis raw response for search_id={resolved_search_id}: {parsed}")
274
+
275
+ match_data = None
276
+ if isinstance(parsed, list) and parsed:
277
+ match_data = parsed[0]
278
+ self.logger.info(f"Redis response is array, extracted first element: {match_data}")
279
+ print(f"Redis response is array, extracted first element: {match_data}")
280
+ elif isinstance(parsed, dict):
281
+ match_data = parsed
282
+ self.logger.info(f"Redis response is dict: {match_data}")
283
+ print(f"Redis response is dict: {match_data}")
284
+ else:
285
+ self.logger.warning(f"Redis response is neither list nor dict: {type(parsed)}, value: {parsed}")
286
+ print(f"WARNING: Redis response is neither list nor dict: {type(parsed)}, value: {parsed}")
287
+
288
+ if not isinstance(match_data, dict):
289
+ self.logger.warning(f"match_data is not a dict after extraction: {type(match_data)}, value: {match_data}")
290
+ print(f"WARNING: match_data is not a dict after extraction: {type(match_data)}, value: {match_data}")
291
+ return None
292
+
293
+ staff_id = match_data.get("staffId") or match_data.get("staff_id")
294
+ if not staff_id:
295
+ self.logger.warning(f"No staffId found in match_data: {match_data}")
296
+ print(f"WARNING: No staffId found in match_data: {match_data}")
297
+ return None
298
+ person_name = str(match_data.get("name") or "Unknown")
299
+ confidence = float(match_data.get("conf") or match_data.get("confidence") or 0.0)
300
+ employee_id = match_data.get("employeeId") or match_data.get("embeddingId")
301
+
302
+ # Log the extracted values
303
+ self.logger.info(
304
+ f"Redis match extracted - staff_id={staff_id}, person_name={person_name}, "
305
+ f"confidence={confidence}, employee_id={employee_id}"
306
+ )
307
+ print(
308
+ f"Redis match extracted - staff_id={staff_id}, person_name={person_name}, "
309
+ f"confidence={confidence}, employee_id={employee_id}"
310
+ )
311
+
312
+ # Check confidence threshold before returning
313
+ min_conf = float(min_confidence if min_confidence is not None else self.default_min_confidence)
314
+ if confidence < min_conf:
315
+ self.logger.debug(
316
+ f"Redis match confidence {confidence:.3f} below threshold {min_conf:.3f}, rejecting"
317
+ )
318
+ print(f"Redis match confidence {confidence:.3f} below threshold {min_conf:.3f}, rejecting")
319
+ return None
320
+
321
+ result = RedisFaceMatchResult(
322
+ staff_id=str(staff_id),
323
+ person_name=person_name,
324
+ confidence=round(confidence, 3),
325
+ employee_id=str(employee_id) if employee_id else None,
326
+ raw=match_data,
327
+ )
328
+
329
+ poll_time = (time.monotonic() - start_poll_time) * 1000.0
330
+ self.logger.info(
331
+ f"Redis match result created (polls={poll_count}, poll_time={poll_time:.2f}ms): "
332
+ f"staff_id={result.staff_id}, name={result.person_name}, conf={result.confidence}"
333
+ )
334
+ print(
335
+ f"Redis match result created (polls={poll_count}, poll_time={poll_time:.2f}ms): "
336
+ f"staff_id={result.staff_id}, name={result.person_name}, conf={result.confidence}"
337
+ )
338
+
339
+ return result
340
+
341
+ # Use shorter sleep for faster response (already reduced poll_interval to 5ms)
342
+ await asyncio.sleep(self.poll_interval)
343
+
344
+ poll_time = (time.monotonic() - start_poll_time) * 1000.0
345
+ self.logger.warning(
346
+ "Timed out waiting for Redis face match result for key %s (timeout=%.3fs, polls=%d, poll_time=%.2fms)",
347
+ result_key,
348
+ self.response_timeout,
349
+ poll_count,
350
+ poll_time,
351
+ )
352
+ print(
353
+ f"WARNING: Redis timeout for search_id={resolved_search_id} "
354
+ f"(timeout={self.response_timeout:.3f}s, polls={poll_count}, poll_time={poll_time:.2f}ms)"
355
+ )
356
+ return None
357
+
358
+ def _prepare_embedding_list(self, embedding: List[float]) -> List[float]:
359
+ if isinstance(embedding, np.ndarray):
360
+ return embedding.astype(np.float32).tolist()
361
+ prepared = []
362
+ try:
363
+ for value in embedding:
364
+ prepared.append(float(value))
365
+ except Exception:
366
+ self.logger.debug("Failed to convert embedding to float list", exc_info=True)
367
+ return []
368
+ return prepared
369
+
370
+ def _generate_search_id(self) -> str:
371
+ return f"face_{int(time.time() * 1000)}"
372
+
373
+ async def _ensure_app_deployment_id(self) -> Optional[str]:
374
+ if self._app_deployment_id:
375
+ return self._app_deployment_id
376
+
377
+ async with self._app_dep_lock:
378
+ if self._app_deployment_id:
379
+ return self._app_deployment_id
380
+
381
+ action_id = self._action_id or self._discover_action_id()
382
+ if not action_id:
383
+ self.logger.warning(
384
+ "Unable to determine action_id for Redis face matcher"
385
+ )
386
+ return None
387
+
388
+ session = await self._ensure_session()
389
+ if session is None:
390
+ return None
391
+
392
+ # Use run_in_executor for Python 3.8 compatibility (asyncio.to_thread requires 3.9+)
393
+ loop = asyncio.get_running_loop()
394
+ response = await loop.run_in_executor(
395
+ None, self._fetch_action_details_sync, session, action_id
396
+ )
397
+ if not response or not response.get("success", False):
398
+ self.logger.warning(
399
+ "Failed to fetch action details for action_id=%s", action_id
400
+ )
401
+ return None
402
+
403
+ action_doc = response.get("data", {})
404
+ action_details = action_doc.get("actionDetails", {})
405
+ app_dep_id = (
406
+ action_details.get("app_deployment_id")
407
+ or action_details.get("appDepId")
408
+ )
409
+ redis_server_id = (
410
+ action_details.get("redis_server_id")
411
+ or action_details.get("redisServerId")
412
+ or action_details.get("redis_serverid")
413
+ or action_details.get("redisServerID")
414
+ )
415
+ if not app_dep_id:
416
+ self.logger.warning(
417
+ "app_deployment_id missing in action details for action_id=%s",
418
+ action_id,
419
+ )
420
+ return None
421
+
422
+ self._app_deployment_id = str(app_dep_id)
423
+ if redis_server_id:
424
+ self._redis_server_id = str(redis_server_id)
425
+ self.logger.info(
426
+ "Resolved app deployment id %s for action_id=%s",
427
+ self._app_deployment_id,
428
+ action_id,
429
+ )
430
+ return self._app_deployment_id
431
+
432
+ async def _ensure_session(self):
433
+ if self._session or not HAS_MATRICE_SESSION:
434
+ if not self._session and not HAS_MATRICE_SESSION:
435
+ self.logger.warning(
436
+ "matrice_common.session unavailable; cannot create RPC session for Redis matcher"
437
+ )
438
+ return self._session
439
+
440
+ async with self._session_lock:
441
+ if self._session:
442
+ return self._session
443
+
444
+ access_key = os.getenv("MATRICE_ACCESS_KEY_ID")
445
+ secret_key = os.getenv("MATRICE_SECRET_ACCESS_KEY")
446
+ account_number = os.getenv("MATRICE_ACCOUNT_NUMBER", "")
447
+
448
+ if not access_key or not secret_key:
449
+ self.logger.warning(
450
+ "Missing Matrice credentials; cannot initialize session for Redis matcher"
451
+ )
452
+ return None
453
+
454
+ try:
455
+ self._session = Session(
456
+ account_number=account_number,
457
+ access_key=access_key,
458
+ secret_key=secret_key,
459
+ )
460
+ self.logger.info("Initialized Matrice session for Redis face matcher")
461
+ except Exception as exc:
462
+ self.logger.error(
463
+ "Failed to initialize Matrice session for Redis matcher: %s",
464
+ exc,
465
+ exc_info=True,
466
+ )
467
+ self._session = None
468
+
469
+ return self._session
470
+
471
+ async def _ensure_redis_client(self):
472
+ if self._redis_client:
473
+ return self._redis_client
474
+
475
+ async with self._redis_lock:
476
+ if self._redis_client:
477
+ return self._redis_client
478
+
479
+ if not self.redis_url:
480
+ host = os.getenv("FACE_RECOG_REDIS_HOST")
481
+ port = os.getenv("FACE_RECOG_REDIS_PORT")
482
+ if host and port:
483
+ self.redis_url = f"redis://{host}:{port}/0"
484
+
485
+ if self.redis_url:
486
+ try:
487
+ self._redis_client = aioredis.from_url(
488
+ self.redis_url,
489
+ decode_responses=True,
490
+ health_check_interval=30,
491
+ )
492
+ self.logger.info(
493
+ "Connected Redis face matcher client to %s (stream=%s)",
494
+ self.redis_url,
495
+ self.stream_name,
496
+ )
497
+ return self._redis_client
498
+ except Exception as exc:
499
+ self.logger.error(
500
+ "Failed to connect to Redis at %s: %s",
501
+ self.redis_url,
502
+ exc,
503
+ exc_info=True,
504
+ )
505
+ self._redis_client = None
506
+
507
+ conn_params = await self._ensure_redis_connection_params()
508
+ if not conn_params:
509
+ self.logger.error(
510
+ "Redis connection parameters unavailable. Configure FACE_RECOG_REDIS_URL or ensure redis_server_id is set."
511
+ )
512
+ return None
513
+
514
+ try:
515
+ self._redis_client = aioredis.Redis(
516
+ host=conn_params.get("host"),
517
+ port=conn_params.get("port", 6379),
518
+ username=conn_params.get("username"),
519
+ password=conn_params.get("password") or None,
520
+ db=conn_params.get("db", 0),
521
+ ssl=conn_params.get("ssl", False),
522
+ decode_responses=True,
523
+ socket_connect_timeout=conn_params.get("connection_timeout", 120),
524
+ socket_timeout=conn_params.get("socket_timeout", 120),
525
+ retry_on_timeout=True,
526
+ health_check_interval=30,
527
+ )
528
+ self.logger.info(
529
+ "Connected Redis face matcher client to %s:%s (db=%s, stream=%s)",
530
+ conn_params.get("host"),
531
+ conn_params.get("port"),
532
+ conn_params.get("db"),
533
+ self.stream_name,
534
+ )
535
+ except Exception as exc:
536
+ self.logger.error(
537
+ "Failed to create Redis client with fetched parameters: %s",
538
+ exc,
539
+ exc_info=True,
540
+ )
541
+ self._redis_client = None
542
+
543
+ return self._redis_client
544
+
545
+ async def _ensure_redis_connection_params(self) -> Optional[Dict[str, Any]]:
546
+ if self._redis_connection_params:
547
+ return self._redis_connection_params
548
+
549
+ if not self.face_client:
550
+ self.logger.warning(
551
+ "Cannot fetch Redis connection parameters without face_client"
552
+ )
553
+ return None
554
+
555
+ await self._ensure_app_deployment_id()
556
+
557
+ try:
558
+ response = await self.face_client.get_redis_details()
559
+ except Exception as exc:
560
+ self.logger.error(
561
+ "Failed to fetch Redis details from facial recognition server: %s",
562
+ exc,
563
+ exc_info=True,
564
+ )
565
+ return None
566
+
567
+ if not response or not response.get("success", False):
568
+ self.logger.warning(
569
+ "Redis details API returned failure: %s",
570
+ response,
571
+ )
572
+ return None
573
+
574
+ data = response.get("data", {})
575
+ host = data.get("REDIS_IP")
576
+ port = data.get("REDIS_PORT")
577
+ password = data.get("REDIS_PASSWORD")
578
+
579
+ if not host or not port:
580
+ self.logger.warning(
581
+ "Redis details missing REDIS_IP or REDIS_PORT"
582
+ )
583
+ return None
584
+
585
+ try:
586
+ params = {
587
+ "host": host,
588
+ "port": int(port),
589
+ "password": password or None,
590
+ "username": None,
591
+ "db": 0,
592
+ "connection_timeout": 120,
593
+ "socket_timeout": 120,
594
+ "ssl": False,
595
+ }
596
+ except Exception as exc:
597
+ self.logger.error(
598
+ "Invalid Redis connection config: %s",
599
+ exc,
600
+ exc_info=True,
601
+ )
602
+ return None
603
+
604
+ self._redis_connection_params = params
605
+ return self._redis_connection_params
606
+
607
+ @classmethod
608
+ def _discover_action_id(cls) -> Optional[str]:
609
+ candidates: List[str] = []
610
+ try:
611
+ cwd = Path.cwd()
612
+ candidates.append(cwd.name)
613
+ for parent in cwd.parents:
614
+ candidates.append(parent.name)
615
+ except Exception:
616
+ pass
617
+
618
+ try:
619
+ usr_src = Path("/usr/src")
620
+ if usr_src.exists():
621
+ for child in usr_src.iterdir():
622
+ if child.is_dir():
623
+ candidates.append(child.name)
624
+ except Exception:
625
+ pass
626
+
627
+ for candidate in candidates:
628
+ if candidate and len(candidate) >= 8 and cls.ACTION_ID_PATTERN.match(candidate):
629
+ return candidate
630
+ return None
631
+
632
+ def _fetch_action_details_sync(self, session, action_id: str) -> Optional[Dict[str, Any]]:
633
+ url = f"/v1/actions/action/{action_id}/details"
634
+ try:
635
+ return session.rpc.get(url)
636
+ except Exception as exc:
637
+ self.logger.error(
638
+ "Failed to fetch action details for action_id=%s: %s",
639
+ action_id,
640
+ exc,
641
+ exc_info=True,
642
+ )
643
+ return None
644
+
645
+
86
646
  ## Removed FaceTracker fallback (using AdvancedTracker only)
87
647
 
88
648
 
@@ -97,8 +657,9 @@ class TemporalIdentityManager:
97
657
  def __init__(
98
658
  self,
99
659
  face_client: FacialRecognitionClient,
100
- embedding_manager = None,
101
- recognition_threshold: float = 0.35,
660
+ embedding_manager=None,
661
+ redis_matcher: Optional[RedisFaceMatcher] = None,
662
+ recognition_threshold: float = 0.3,
102
663
  history_size: int = 20,
103
664
  unknown_patience: int = 7,
104
665
  switch_patience: int = 5,
@@ -107,12 +668,14 @@ class TemporalIdentityManager:
107
668
  self.logger = logging.getLogger(__name__)
108
669
  self.face_client = face_client
109
670
  self.embedding_manager = embedding_manager
671
+ self.redis_matcher = redis_matcher
110
672
  self.threshold = float(recognition_threshold)
111
673
  self.history_size = int(history_size)
112
674
  self.unknown_patience = int(unknown_patience)
113
675
  self.switch_patience = int(switch_patience)
114
676
  self.fallback_margin = float(fallback_margin)
115
677
  self.tracks: Dict[Any, Dict[str, object]] = {}
678
+ self.emb_run=False
116
679
 
117
680
  def _ensure_track(self, track_id: Any) -> None:
118
681
  if track_id not in self.tracks:
@@ -128,7 +691,13 @@ class TemporalIdentityManager:
128
691
  "streaks": defaultdict(int), # staff_id -> consecutive frames
129
692
  }
130
693
 
131
- async def _compute_best_identity(self, emb: List[float], location: str = "", timestamp: str = "") -> Tuple[Optional[str], str, float, Optional[str], Dict[str, Any], str]:
694
+ async def _compute_best_identity(
695
+ self,
696
+ emb: List[float],
697
+ location: str = "",
698
+ timestamp: str = "",
699
+ search_id: Optional[str] = None,
700
+ ) -> Tuple[Optional[str], str, float, Optional[str], Dict[str, Any], str]:
132
701
  """
133
702
  Find best identity match using local similarity search (fast) with optional API fallback.
134
703
  Returns (staff_id, person_name, score, employee_id, staff_details, detection_type).
@@ -138,11 +707,86 @@ class TemporalIdentityManager:
138
707
  """
139
708
  if not emb or not isinstance(emb, list):
140
709
  return None, "Unknown", 0.0, None, {}, "unknown"
141
-
142
- st10 = time.time()
143
-
710
+
711
+ #-------------- New Redis API Fast Call Start------------------------------------------------------------------------------------------------------------------------------
712
+ # ALWAYS attempt Redis match for every detection (required for every frame)
713
+ if self.redis_matcher:
714
+ try:
715
+ self.logger.debug(f"Attempting Redis match for search_id={search_id}, embedding_len={len(emb) if emb else 0}")
716
+ redis_start_time = time.time()
717
+ redis_match = await self.redis_matcher.match_embedding(
718
+ embedding=emb,
719
+ search_id=search_id,
720
+ location=location or "",
721
+ min_confidence=self.threshold, # Use recognition threshold instead of default_min_confidence
722
+ )
723
+ redis_latency_ms = (time.time() - redis_start_time) * 1000.0
724
+
725
+ if redis_match:
726
+ self.logger.info(
727
+ f"Redis match found in {redis_latency_ms:.2f}ms - staff_id={redis_match.staff_id}, "
728
+ f"person_name={redis_match.person_name}, confidence={redis_match.confidence:.3f}"
729
+ )
730
+ print(
731
+ f"Redis match found in {redis_latency_ms:.2f}ms - staff_id={redis_match.staff_id}, "
732
+ f"person_name={redis_match.person_name}, confidence={redis_match.confidence:.3f}"
733
+ )
734
+
735
+ if redis_match.staff_id:
736
+ staff_details = (
737
+ dict(redis_match.raw) if isinstance(redis_match.raw, dict) else {}
738
+ )
739
+ if redis_match.person_name and not staff_details.get("name"):
740
+ staff_details["name"] = redis_match.person_name
741
+
742
+ # Check if confidence meets threshold
743
+ if float(redis_match.confidence) >= self.threshold:
744
+ self.logger.info(
745
+ "Redis embedding match ACCEPTED - staff_id=%s, person_name=%s, score=%.3f (threshold=%.3f)",
746
+ redis_match.staff_id,
747
+ redis_match.person_name,
748
+ float(redis_match.confidence),
749
+ self.threshold,
750
+ )
751
+ print(
752
+ f"Redis embedding match ACCEPTED - staff_id={redis_match.staff_id}, "
753
+ f"person_name={redis_match.person_name}, score={redis_match.confidence:.3f} "
754
+ f"(threshold={self.threshold:.3f})"
755
+ )
756
+ return (
757
+ str(redis_match.staff_id),
758
+ redis_match.person_name or "Unknown",
759
+ float(redis_match.confidence),
760
+ redis_match.employee_id,
761
+ staff_details,
762
+ "known",
763
+ )
764
+ else:
765
+ self.logger.debug(
766
+ "Redis embedding match REJECTED - confidence %.3f below threshold %.3f",
767
+ float(redis_match.confidence),
768
+ self.threshold,
769
+ )
770
+ print(
771
+ f"Redis embedding match REJECTED - confidence {redis_match.confidence:.3f} "
772
+ f"below threshold {self.threshold:.3f}"
773
+ )
774
+ else:
775
+ self.logger.warning("Redis match returned but staff_id is None/empty")
776
+ print("WARNING: Redis match returned but staff_id is None/empty")
777
+ else:
778
+ self.logger.debug(f"No Redis match found for search_id={search_id} (took {redis_latency_ms:.2f}ms)")
779
+ print(f"No Redis match found for search_id={search_id} (took {redis_latency_ms:.2f}ms)")
780
+ except Exception as exc:
781
+ self.logger.warning(
782
+ "Redis face match flow failed; falling back to local search: %s",
783
+ exc,
784
+ exc_info=True,
785
+ )
786
+ print(f"Redis face match flow failed: {exc}")
787
+ #-------------- New Redis API Fast Call END------------------------------------------------------------------------------------------------------------------------------
144
788
  # PRIMARY PATH: Local similarity search using EmbeddingManager (FAST - ~1-5ms)
145
- if self.embedding_manager:
789
+ if self.embedding_manager and self.emb_run:
146
790
  # Defensive check: ensure embeddings are loaded before attempting search
147
791
  if not self.embedding_manager.is_ready():
148
792
  status = self.embedding_manager.get_status()
@@ -169,10 +813,6 @@ class TemporalIdentityManager:
169
813
  elif first_name or last_name:
170
814
  person_name = f"{first_name or ''} {last_name or ''}".strip() or "Unknown"
171
815
 
172
- # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY (LOCAL)----------------------------")
173
- # print("LATENCY:",(time.time() - st10)*1000,"| Throughput fps:",(1.0 / (time.time() - st10)) if (time.time() - st10) > 0 else None)
174
- # print(f"LOCAL MATCH: staff_id={staff_embedding.staff_id}, similarity={similarity_score:.3f}")
175
- # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY (LOCAL)----------------------------")
176
816
 
177
817
  self.logger.info(f"Local embedding match - staff_id={staff_embedding.staff_id}, person_name={person_name}, score={similarity_score:.3f}")
178
818
 
@@ -192,10 +832,6 @@ class TemporalIdentityManager:
192
832
  except Exception:
193
833
  pass
194
834
  self.logger.debug(f"No local match found - best_similarity={best_sim:.3f}, threshold={self.threshold:.3f}")
195
- # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY (LOCAL - NO MATCH)----------------------------")
196
- # print("LATENCY:",(time.time() - st10)*1000,"| Throughput fps:",(1.0 / (time.time() - st10)) if (time.time() - st10) > 0 else None)
197
- # print(f"BEST_SIM={best_sim:.3f} THRESH={self.threshold:.3f}")
198
- # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY (LOCAL - NO MATCH)----------------------------")
199
835
 
200
836
  return None, "Unknown", 0.0, None, {}, "unknown"
201
837
 
@@ -203,74 +839,83 @@ class TemporalIdentityManager:
203
839
  self.logger.warning(f"Local similarity search failed, falling back to API: {e}")
204
840
  # Fall through to API call below
205
841
 
842
+ #---------------------------------BACKUP MONGODB API SLOW CALL--------------------------------------------------------------------------------------
206
843
  # FALLBACK PATH: API call (SLOW - ~2000ms) - only if embedding manager not available
207
844
  # This path should rarely be used in production
208
- try:
209
- self.logger.warning("Using slow API fallback for identity search - consider checking embedding manager initialization")
210
- resp = await self.face_client.search_similar_faces(
211
- face_embedding=emb,
212
- threshold=0.01, # low threshold to always get top-1
213
- limit=1,
214
- collection="staff_enrollment",
215
- location=location,
216
- timestamp=timestamp,
217
- )
218
- # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY (API FALLBACK)----------------------------")
219
- # print("LATENCY:",(time.time() - st10)*1000,"| Throughput fps:",(1.0 / (time.time() - st10)) if (time.time() - st10) > 0 else None)
220
- # print("WARNING: Using slow API fallback!")
221
- # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY (API FALLBACK)----------------------------")
222
-
223
- except Exception as e:
224
- self.logger.error(f"API ERROR: Failed to search similar faces in _compute_best_identity: {e}", exc_info=True)
225
- return None, "Unknown", 0.0, None, {}, "unknown"
845
+ # try:
846
+ # self.logger.warning("Using slow API fallback for identity search - consider checking embedding manager initialization")
847
+ # resp = await self.face_client.search_similar_faces(
848
+ # face_embedding=emb,
849
+ # threshold=0.01, # low threshold to always get top-1
850
+ # limit=1,
851
+ # collection="staff_enrollment",
852
+ # location=location,
853
+ # timestamp=timestamp,
854
+ # )
855
+
226
856
 
227
- try:
228
- results: List[Any] = []
229
- self.logger.debug('API Response received for identity search')
230
- if isinstance(resp, dict):
231
- if isinstance(resp.get("data"), list):
232
- results = resp.get("data", [])
233
- elif isinstance(resp.get("results"), list):
234
- results = resp.get("results", [])
235
- elif isinstance(resp.get("items"), list):
236
- results = resp.get("items", [])
237
- elif isinstance(resp, list):
238
- results = resp
239
-
240
- if not results:
241
- self.logger.debug("No identity match found from API")
242
- return None, "Unknown", 0.0, None, {}, "unknown"
857
+ # except Exception as e:
858
+ # self.logger.error(f"API ERROR: Failed to search similar faces in _compute_best_identity: {e}", exc_info=True)
859
+ # return None, "Unknown", 0.0, None, {}, "unknown"
243
860
 
244
- item = results[0] if isinstance(results, list) else results
245
- self.logger.debug(f'Top-1 match from API: {item}')
246
- # Be defensive with keys and types
247
- staff_id = item.get("staffId") if isinstance(item, dict) else None
248
- employee_id = str(item.get("_id")) if isinstance(item, dict) and item.get("_id") is not None else None
249
- score = float(item.get("score", 0.0)) if isinstance(item, dict) else 0.0
250
- detection_type = str(item.get("detectionType", "unknown")) if isinstance(item, dict) else "unknown"
251
- staff_details = item.get("staffDetails", {}) if isinstance(item, dict) else {}
252
- # Extract a person name from staff_details
253
- person_name = "Unknown"
254
- if isinstance(staff_details, dict) and staff_details:
255
- first_name = staff_details.get("firstName")
256
- last_name = staff_details.get("lastName")
257
- name = staff_details.get("name")
258
- if name:
259
- person_name = str(name)
260
- else:
261
- if first_name or last_name:
262
- person_name = f"{first_name or ''} {last_name or ''}".strip() or "UnknowNN" #TODO:ebugging change to normal once done
263
- # If API says unknown or missing staff_id, treat as unknown
264
- if not staff_id: #or detection_type == "unknown"
265
- self.logger.debug(f"API returned unknown or missing staff_id - score={score}, employee_id={employee_id}")
266
- return None, "Unknown", float(score), employee_id, staff_details if isinstance(staff_details, dict) else {}, "unknown"
267
- self.logger.info(f"API identified face - staff_id={staff_id}, person_name={person_name}, score={score:.3f}")
268
- return str(staff_id), person_name, float(score), employee_id, staff_details if isinstance(staff_details, dict) else {}, "known"
269
- except Exception as e:
270
- self.logger.error(f"Error parsing API response in _compute_best_identity: {e}", exc_info=True)
271
- return None, "Unknown", 0.0, None, {}, "unknown"
861
+ # try:
862
+ # results: List[Any] = []
863
+ # self.logger.debug('API Response received for identity search')
864
+ # if isinstance(resp, dict):
865
+ # if isinstance(resp.get("data"), list):
866
+ # results = resp.get("data", [])
867
+ # elif isinstance(resp.get("results"), list):
868
+ # results = resp.get("results", [])
869
+ # elif isinstance(resp.get("items"), list):
870
+ # results = resp.get("items", [])
871
+ # elif isinstance(resp, list):
872
+ # results = resp
873
+
874
+ # if not results:
875
+ # self.logger.debug("No identity match found from API")
876
+ # return None, "Unknown", 0.0, None, {}, "unknown"
877
+
878
+ # item = results[0] if isinstance(results, list) else results
879
+ # self.logger.debug(f'Top-1 match from API: {item}')
880
+ # # Be defensive with keys and types
881
+ # staff_id = item.get("staffId") if isinstance(item, dict) else None
882
+ # employee_id = str(item.get("_id")) if isinstance(item, dict) and item.get("_id") is not None else None
883
+ # score = float(item.get("score", 0.0)) if isinstance(item, dict) else 0.0
884
+ # detection_type = str(item.get("detectionType", "unknown")) if isinstance(item, dict) else "unknown"
885
+ # staff_details = item.get("staffDetails", {}) if isinstance(item, dict) else {}
886
+ # # Extract a person name from staff_details
887
+ # person_name = "Unknown"
888
+ # if isinstance(staff_details, dict) and staff_details:
889
+ # first_name = staff_details.get("firstName")
890
+ # last_name = staff_details.get("lastName")
891
+ # name = staff_details.get("name")
892
+ # if name:
893
+ # person_name = str(name)
894
+ # else:
895
+ # if first_name or last_name:
896
+ # person_name = f"{first_name or ''} {last_name or ''}".strip() or "UnknowNN" #TODO:ebugging change to normal once done
897
+ # # If API says unknown or missing staff_id, treat as unknown
898
+ # if not staff_id: #or detection_type == "unknown"
899
+ # self.logger.debug(f"API returned unknown or missing staff_id - score={score}, employee_id={employee_id}")
900
+ # return None, "Unknown", float(score), employee_id, staff_details if isinstance(staff_details, dict) else {}, "unknown"
901
+ # self.logger.info(f"API identified face - staff_id={staff_id}, person_name={person_name}, score={score:.3f}")
902
+ # return str(staff_id), person_name, float(score), employee_id, staff_details if isinstance(staff_details, dict) else {}, "known"
903
+ # except Exception as e:
904
+ # self.logger.error(f"Error parsing API response in _compute_best_identity: {e}", exc_info=True)
905
+ # return None, "Unknown", 0.0, None, {}, "unknown"
906
+ #---------------------------------BACKUP MONGODB API SLOW CALL--------------------------------------------------------------------------------------
907
+
908
+ # If we reach here, no match was found through any method
909
+ self.logger.debug("No identity match found - returning unknown")
910
+ return None, "Unknown", 0.0, None, {}, "unknown"
272
911
 
273
- async def _compute_best_identity_from_history(self, track_state: Dict[str, object], location: str = "", timestamp: str = "") -> Tuple[Optional[str], str, float, Optional[str], Dict[str, Any], str]:
912
+ async def _compute_best_identity_from_history(
913
+ self,
914
+ track_state: Dict[str, object],
915
+ location: str = "",
916
+ timestamp: str = "",
917
+ search_id: Optional[str] = None,
918
+ ) -> Tuple[Optional[str], str, float, Optional[str], Dict[str, Any], str]:
274
919
  hist: deque = track_state.get("embedding_history", deque()) # type: ignore
275
920
  if not hist:
276
921
  return None, "Unknown", 0.0, None, {}, "unknown"
@@ -281,7 +926,12 @@ class TemporalIdentityManager:
281
926
  except Exception as e:
282
927
  self.logger.error(f"Error computing prototype from history: {e}", exc_info=True)
283
928
  proto_list = []
284
- return await self._compute_best_identity(proto_list, location=location, timestamp=timestamp)
929
+ return await self._compute_best_identity(
930
+ proto_list,
931
+ location=location,
932
+ timestamp=timestamp,
933
+ search_id=search_id,
934
+ )
285
935
 
286
936
  async def update(
287
937
  self,
@@ -290,6 +940,7 @@ class TemporalIdentityManager:
290
940
  eligible_for_recognition: bool,
291
941
  location: str = "",
292
942
  timestamp: str = "",
943
+ search_id: Optional[str] = None,
293
944
  ) -> Tuple[Optional[str], str, float, Optional[str], Dict[str, Any], str]:
294
945
  """
295
946
  Update temporal identity state for a track and return a stabilized identity.
@@ -321,7 +972,7 @@ class TemporalIdentityManager:
321
972
  if eligible_for_recognition and emb:
322
973
  st8=time.time()
323
974
  staff_id, person_name, inst_score, employee_id, staff_details, det_type = await self._compute_best_identity(
324
- emb, location=location, timestamp=timestamp
975
+ emb, location=location, timestamp=timestamp, search_id=search_id
325
976
  )
326
977
  # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY_I----------------------------")
327
978
  # print("LATENCY:",(time.time() - st8)*1000,"| Throughput fps:",(1.0 / (time.time() - st8)) if (time.time() - st8) > 0 else None)
@@ -386,7 +1037,10 @@ class TemporalIdentityManager:
386
1037
 
387
1038
  # Fallback: use prototype from history
388
1039
  st9=time.time()
389
- fb_staff_id, fb_name, fb_score, fb_employee_id, fb_details, fb_type = await self._compute_best_identity_from_history(s, location=location, timestamp=timestamp)
1040
+ history_search_id = f"{search_id}_hist" if search_id else None
1041
+ fb_staff_id, fb_name, fb_score, fb_employee_id, fb_details, fb_type = await self._compute_best_identity_from_history(
1042
+ s, location=location, timestamp=timestamp, search_id=history_search_id
1043
+ )
390
1044
  # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY FROM HISTORY----------------------------")
391
1045
  # print("LATENCY:",(time.time() - st9)*1000,"| Throughput fps:",(1.0 / (time.time() - st9)) if (time.time() - st9) > 0 else None)
392
1046
  # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE - COMPUTE BEST IDENTITY FROM HISTORY----------------------------")
@@ -427,9 +1081,9 @@ class FaceRecognitionEmbeddingConfig(BaseConfig):
427
1081
  smoothing_confidence_range_factor: float = 0.5
428
1082
 
429
1083
  # Base confidence threshold (separate from embedding similarity threshold)
430
- similarity_threshold: float = 0.2 # Lowered to match local code - 0.45 was too conservative
1084
+ similarity_threshold: float = 0.3 # Lowered to match local code - 0.45 was too conservative
431
1085
  # Base confidence threshold (separate from embedding similarity threshold)
432
- confidence_threshold: float = 0.1 # Detection confidence threshold
1086
+ confidence_threshold: float = 0.06 # Detection confidence threshold
433
1087
 
434
1088
  # Face recognition optional features
435
1089
  enable_face_tracking: bool = True # Enable BYTE TRACKER advanced face tracking -- KEEP IT TRUE ALWAYS
@@ -523,6 +1177,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
523
1177
 
524
1178
  # Initialize EmbeddingManager - will be configured in process method
525
1179
  self.embedding_manager = None
1180
+ self.redis_face_matcher = None
526
1181
  # Temporal identity manager for API-based top-1 identity smoothing
527
1182
  self.temporal_identity_manager = None
528
1183
  # Removed lightweight face tracker fallback; we always use AdvancedTracker
@@ -542,7 +1197,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
542
1197
  # Initialization must be done by calling await initialize(config) after instantiation
543
1198
  # This is handled in PostProcessor._get_use_case_instance()
544
1199
 
545
- async def initialize(self, config: Optional[FaceRecognitionEmbeddingConfig] = None) -> None:
1200
+ async def initialize(self, config: Optional[FaceRecognitionEmbeddingConfig] = None, emb:bool=False) -> None:
546
1201
  """
547
1202
  Async initialization method to set up face client and all components.
548
1203
  Must be called after __init__ before process() can be called.
@@ -563,7 +1218,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
563
1218
 
564
1219
  Args:
565
1220
  config: Optional config to use. If not provided, uses config from __init__.
566
-
1221
+ emb: Optional boolean to indicate if embedding manager should be loaded. If True, embedding manager will be loaded.
567
1222
  Raises:
568
1223
  RuntimeError: If embeddings fail to load or verification fails
569
1224
  """
@@ -584,69 +1239,80 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
584
1239
  raise TypeError(f"Invalid config type for initialization: {type(init_config)}, expected FaceRecognitionEmbeddingConfig")
585
1240
 
586
1241
  self.logger.info("Initializing face recognition use case with provided config")
587
-
1242
+ # print("=============== STEP 1: INITIALIZING FACE CLIENT ===============")
588
1243
 
589
1244
  # Initialize face client (includes deployment update)
590
1245
  try:
591
1246
  self.face_client = await self._get_facial_recognition_client(init_config)
592
-
1247
+ # print(f"=============== FACE CLIENT INITIALIZED: {self.face_client is not None} ===============")
593
1248
 
594
1249
  # Initialize People activity logging if enabled
595
1250
  if init_config.enable_people_activity_logging:
596
1251
  self.people_activity_logging = PeopleActivityLogging(self.face_client)
597
1252
  # PeopleActivityLogging starts its background thread in __init__
598
1253
  self.logger.info("People activity logging enabled and started")
1254
+
1255
+ # Initialize Redis face matcher for fast remote similarity search
1256
+ try:
1257
+ redis_session = getattr(self.face_client, "session", None)
1258
+ except Exception:
1259
+ redis_session = None
1260
+ self.redis_face_matcher = RedisFaceMatcher(
1261
+ session=redis_session,
1262
+ logger=self.logger,
1263
+ face_client=self.face_client,
1264
+ )
599
1265
 
600
1266
  # Initialize EmbeddingManager
601
-
602
1267
  if not init_config.embedding_config:
603
-
604
- init_config.embedding_config = EmbeddingConfig(
605
- similarity_threshold=init_config.similarity_threshold,
606
- confidence_threshold=init_config.confidence_threshold,
607
- enable_track_id_cache=init_config.enable_track_id_cache,
608
- cache_max_size=init_config.cache_max_size,
609
- cache_ttl=3600,
610
- background_refresh_interval=43200,
611
- staff_embeddings_cache_ttl=43200,
612
- )
1268
+
1269
+ init_config.embedding_config = EmbeddingConfig(
1270
+ similarity_threshold=init_config.similarity_threshold,
1271
+ confidence_threshold=init_config.confidence_threshold,
1272
+ enable_track_id_cache=init_config.enable_track_id_cache,
1273
+ cache_max_size=init_config.cache_max_size,
1274
+ cache_ttl=3600,
1275
+ background_refresh_interval=43200,
1276
+ staff_embeddings_cache_ttl=43200,
1277
+ )
613
1278
  self.embedding_manager = EmbeddingManager(init_config.embedding_config, self.face_client)
614
-
1279
+
615
1280
  self.logger.info("Embedding manager initialized")
1281
+ if emb:
1282
+
1283
+ # Load staff embeddings immediately for fast startup (avoid race conditions)
1284
+ # This MUST succeed before we can proceed - fail fast if it doesn't
1285
+
1286
+ embeddings_loaded = await self.embedding_manager._load_staff_embeddings()
1287
+
1288
+ if not embeddings_loaded:
1289
+ error_msg = "CRITICAL: Failed to load staff embeddings at initialization - cannot proceed without embeddings"
1290
+ print(f"=============== {error_msg} ===============")
1291
+ self.logger.error(error_msg)
1292
+ raise RuntimeError(error_msg)
1293
+
1294
+ # Verify embeddings are actually loaded using is_ready() method
1295
+ if not self.embedding_manager.is_ready():
1296
+ status = self.embedding_manager.get_status()
1297
+ error_msg = f"CRITICAL: Embeddings not ready after load - status: {status}"
1298
+ print(f"=============== {error_msg} ===============")
1299
+ self.logger.error(error_msg)
1300
+ raise RuntimeError(error_msg)
1301
+
1302
+ self.logger.info(f"Successfully loaded {len(self.embedding_manager.staff_embeddings)} staff embeddings at initialization")
616
1303
 
617
- embeddings_loaded = await self.embedding_manager._load_staff_embeddings()
618
-
619
-
620
- if not embeddings_loaded:
621
- error_msg = "CRITICAL: Failed to load staff embeddings at initialization - cannot proceed without embeddings"
622
- print(f"=============== {error_msg} ===============")
623
- self.logger.error(error_msg)
624
- raise RuntimeError(error_msg)
625
-
626
- # Verify embeddings are actually loaded using is_ready() method
627
- if not self.embedding_manager.is_ready():
628
- status = self.embedding_manager.get_status()
629
- error_msg = f"CRITICAL: Embeddings not ready after load - status: {status}"
630
- print(f"=============== {error_msg} ===============")
631
- self.logger.error(error_msg)
632
- raise RuntimeError(error_msg)
633
-
634
- # print(f"=============== STAFF EMBEDDINGS COUNT: {len(self.embedding_manager.staff_embeddings)} ===============")
635
- # print(f"=============== EMBEDDINGS MATRIX SHAPE: {self.embedding_manager.embeddings_matrix.shape} ===============")
636
- # print(f"=============== EMBEDDINGS LOADED FLAG: {self.embedding_manager._embeddings_loaded} ===============")
637
- self.logger.info(f"Successfully loaded {len(self.embedding_manager.staff_embeddings)} staff embeddings at initialization")
638
-
639
- # NOW start background refresh after successful initial load (prevents race conditions)
640
- if init_config.embedding_config.enable_background_refresh:
641
- # print("=============== STEP 4: STARTING BACKGROUND REFRESH ===============")
642
- self.embedding_manager.start_background_refresh()
643
- self.logger.info("Background embedding refresh started after successful initial load")
1304
+ # NOW start background refresh after successful initial load (prevents race conditions)
1305
+ if init_config.embedding_config.enable_background_refresh:
1306
+ # print("=============== STEP 4: STARTING BACKGROUND REFRESH ===============")
1307
+ self.embedding_manager.start_background_refresh()
1308
+ self.logger.info("Background embedding refresh started after successful initial load")
644
1309
 
645
1310
  # Initialize TemporalIdentityManager with EmbeddingManager for fast local search
646
1311
  # print("=============== STEP 5: INITIALIZING TEMPORAL IDENTITY MANAGER ===============")
647
1312
  self.temporal_identity_manager = TemporalIdentityManager(
648
1313
  face_client=self.face_client,
649
1314
  embedding_manager=self.embedding_manager,
1315
+ redis_matcher=self.redis_face_matcher,
650
1316
  recognition_threshold=float(init_config.similarity_threshold),
651
1317
  history_size=20,
652
1318
  unknown_patience=7,
@@ -656,24 +1322,117 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
656
1322
  self.logger.info("Temporal identity manager initialized with embedding manager for local similarity search")
657
1323
 
658
1324
  # Final verification before marking as initialized
659
-
660
- if not self.embedding_manager.is_ready():
661
- status = self.embedding_manager.get_status()
662
- error_msg = f"CRITICAL: Final verification failed - embeddings not ready. Status: {status}"
663
- print(f"=============== {error_msg} ===============")
664
- self.logger.error(error_msg)
665
- raise RuntimeError(error_msg)
666
1325
 
667
- # Log detailed status for debugging
668
- status = self.embedding_manager.get_status()
1326
+ # if not self.embedding_manager.is_ready():
1327
+ # status = self.embedding_manager.get_status()
1328
+ # error_msg = f"CRITICAL: Final verification failed - embeddings not ready. Status: {status}"
1329
+ # print(f"=============== {error_msg} ===============")
1330
+ # self.logger.error(error_msg)
1331
+ # raise RuntimeError(error_msg)
1332
+
1333
+ # # Log detailed status for debugging
1334
+ # status = self.embedding_manager.get_status()
1335
+
669
1336
 
670
1337
  self._initialized = True
671
1338
  self.logger.info("Face recognition use case fully initialized and verified")
672
-
1339
+
673
1340
  except Exception as e:
674
1341
  self.logger.error(f"Error during use case initialization: {e}", exc_info=True)
675
1342
  raise RuntimeError(f"Failed to initialize face recognition use case: {e}") from e
676
1343
 
1344
+ def _extract_camera_info_from_stream(self, stream_info: Optional[Dict[str, Any]]) -> Dict[str, str]:
1345
+ """
1346
+ Extract camera_name, camera_id, and location_id from stream_info.
1347
+
1348
+ Args:
1349
+ stream_info: Stream information dictionary
1350
+
1351
+ Returns:
1352
+ Dict with camera_name, camera_id, location_id
1353
+ """
1354
+ camera_name = ""
1355
+ camera_id = ""
1356
+ location_id = ""
1357
+
1358
+ if not stream_info:
1359
+ return {"camera_name": camera_name, "camera_id": camera_id, "location_id": location_id}
1360
+
1361
+ # Extract camera_name from camera_info
1362
+ camera_info = stream_info.get("camera_info", {})
1363
+ if camera_info:
1364
+ camera_name = camera_info.get("camera_name", "")
1365
+ location_id = camera_info.get("location", "")
1366
+
1367
+ # Extract camera_id from topic (format: {camera_id}_input_topic)
1368
+ topic = stream_info.get("topic", "")
1369
+ if topic and "_input_topic" in topic:
1370
+ camera_id = topic.replace("_input_topic", "")
1371
+
1372
+ self.logger.debug(f"Extracted camera info - camera_name: '{camera_name}', camera_id: '{camera_id}', location_id: '{location_id}'")
1373
+
1374
+ return {"camera_name": camera_name, "camera_id": camera_id, "location_id": location_id}
1375
+
1376
+ async def _fetch_location_name(self, location_id: str) -> str:
1377
+ """
1378
+ Fetch location name from API using location_id.
1379
+
1380
+ Args:
1381
+ location_id: The location ID to look up
1382
+
1383
+ Returns:
1384
+ Location name string, or 'Entry Reception' as default if API fails
1385
+ """
1386
+ global _location_name_cache
1387
+ default_location = "Entry Reception"
1388
+
1389
+ if not location_id:
1390
+ self.logger.debug(f"[LOCATION] No location_id provided, using default: '{default_location}'")
1391
+ return default_location
1392
+
1393
+ # Check cache first
1394
+ if location_id in _location_name_cache:
1395
+ cached_name = _location_name_cache[location_id]
1396
+ self.logger.debug(f"[LOCATION] Using cached location name for '{location_id}': '{cached_name}'")
1397
+ return cached_name
1398
+
1399
+ # Need a session to make API call
1400
+ if not self.face_client or not hasattr(self.face_client, 'session') or not self.face_client.session:
1401
+ self.logger.warning(f"[LOCATION] No session available, using default: '{default_location}'")
1402
+ return default_location
1403
+
1404
+ try:
1405
+ endpoint = f"/v1/inference/get_location/{location_id}"
1406
+ self.logger.info(f"[LOCATION] Fetching location name from API: {endpoint}")
1407
+
1408
+ response = self.face_client.session.rpc.get(endpoint)
1409
+
1410
+ if response and isinstance(response, dict):
1411
+ success = response.get("success", False)
1412
+ if success:
1413
+ data = response.get("data", {})
1414
+ location_name = data.get("locationName", default_location)
1415
+ self.logger.info(f"[LOCATION] ✓ Fetched location name: '{location_name}' for location_id: '{location_id}'")
1416
+
1417
+ # Cache the result
1418
+ _location_name_cache[location_id] = location_name
1419
+ return location_name
1420
+ else:
1421
+ self.logger.warning(
1422
+ f"[LOCATION] API returned success=false for location_id '{location_id}': "
1423
+ f"{response.get('message', 'Unknown error')}"
1424
+ )
1425
+ else:
1426
+ self.logger.warning(f"[LOCATION] Invalid response format from API: {response}")
1427
+
1428
+ except Exception as e:
1429
+ self.logger.error(f"[LOCATION] Error fetching location name for '{location_id}': {e}", exc_info=True)
1430
+
1431
+ # Use default on any failure
1432
+ self.logger.info(f"[LOCATION] Using default location name: '{default_location}'")
1433
+ _location_name_cache[location_id] = default_location
1434
+ return default_location
1435
+
677
1436
  async def _get_facial_recognition_client(
678
1437
  self, config: FaceRecognitionEmbeddingConfig
679
1438
  ) -> FacialRecognitionClient:
@@ -683,21 +1442,61 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
683
1442
  self.logger.info(
684
1443
  f"Initializing face recognition client with server ID: {config.facial_recognition_server_id}"
685
1444
  )
1445
+ print(f"=============== CONFIG: {config} ===============")
1446
+ print(f"=============== CONFIG.SESSION: {config.session} ===============")
1447
+ account_number = os.getenv("MATRICE_ACCOUNT_NUMBER", "")
1448
+ access_key_id = os.getenv("MATRICE_ACCESS_KEY_ID", "")
1449
+ secret_key = os.getenv("MATRICE_SECRET_ACCESS_KEY", "")
1450
+ project_id = os.getenv("MATRICE_PROJECT_ID", "")
1451
+
1452
+ self.logger.info(f"[PROJECT_ID] Initial project_id from env: '{project_id}'")
1453
+
1454
+ self.session1 = Session(
1455
+ account_number=account_number,
1456
+ access_key=access_key_id,
1457
+ secret_key=secret_key,
1458
+ project_id=project_id,
1459
+ )
686
1460
  self.face_client = FacialRecognitionClient(
687
- server_id=config.facial_recognition_server_id, session=config.session
1461
+ server_id=config.facial_recognition_server_id, session=self.session1
688
1462
  )
689
1463
  self.logger.info("Face recognition client initialized")
690
1464
 
1465
+ # After FacialRecognitionClient initialization, it may have fetched project_id from action details
1466
+ # and updated MATRICE_PROJECT_ID env var. Update session1 with the correct project_id.
1467
+ updated_project_id = self.face_client.project_id or os.getenv("MATRICE_PROJECT_ID", "")
1468
+ if updated_project_id and updated_project_id != project_id:
1469
+ self.logger.info(f"[PROJECT_ID] Project ID updated by FacialRecognitionClient: '{updated_project_id}'")
1470
+ try:
1471
+ self.session1.update(updated_project_id)
1472
+ self.logger.info(f"[PROJECT_ID] Updated session1 with project_id: '{updated_project_id}'")
1473
+ except Exception as e:
1474
+ self.logger.warning(f"[PROJECT_ID] Failed to update session1 with project_id: {e}")
1475
+ elif updated_project_id:
1476
+ self.logger.info(f"[PROJECT_ID] Using project_id: '{updated_project_id}'")
1477
+
691
1478
  # Call update_deployment if deployment_id is provided
692
1479
  if config.deployment_id:
693
1480
  try:
694
- self.logger.info(f"Updating deployment action with ID: {config.deployment_id}")
695
- response = await self.face_client.update_deployment_action(config.deployment_id)
696
- if response:
697
- self.logger.info(f"Successfully updated deployment action {config.deployment_id}")
1481
+ # Create temporary RedisFaceMatcher to get app_deployment_id using verified method
1482
+ redis_session = getattr(self.face_client, "session", None) or config.session
1483
+ temp_redis_matcher = RedisFaceMatcher(
1484
+ session=redis_session,
1485
+ logger=self.logger,
1486
+ face_client=self.face_client,
1487
+ )
1488
+ app_deployment_id = await temp_redis_matcher._ensure_app_deployment_id()
1489
+
1490
+ if app_deployment_id:
1491
+ self.logger.info(f"Updating deployment action with app_deployment_id: {app_deployment_id}")
1492
+ response = await self.face_client.update_deployment_action(app_deployment_id)
1493
+ if response:
1494
+ self.logger.info(f"Successfully updated deployment action {app_deployment_id}")
1495
+ else:
1496
+ self.logger.warning(f"Failed to update deployment: {response.get('error', 'Unknown error')}")
698
1497
  else:
699
- self.logger.warning(f"Failed to update deployment: {response.get('error', 'Unknown error')}")
700
-
1498
+ self.logger.warning("Could not resolve app_deployment_id, skipping deployment action update")
1499
+
701
1500
  self.logger.info(f"Updating deployment with ID: {config.deployment_id}")
702
1501
  response = await self.face_client.update_deployment(config.deployment_id)
703
1502
  if response:
@@ -728,6 +1527,9 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
728
1527
  """
729
1528
  processing_start = time.time()
730
1529
  # Ensure config is correct type
1530
+ self.logger.info(f"[CONFIG-PRINT]-------------------------- {config} --------------------------")
1531
+ self.logger.info(f"[STREAM-PRINT]-------------------------- {stream_info} --------------------------")
1532
+
731
1533
  if not isinstance(config, FaceRecognitionEmbeddingConfig):
732
1534
  return self.create_error_result(
733
1535
  "Invalid config type",
@@ -818,9 +1620,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
818
1620
  )
819
1621
  self.logger.debug("Applied category filtering")
820
1622
 
821
- # print("------------------TILL TRACKER MS----------------------------")
822
- # print(self._initialized,"LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
823
- # print("------------------TILL TRACKER MS----------------------------")
1623
+
824
1624
  # Advanced tracking (BYTETracker-like) - only if enabled
825
1625
  if config.enable_face_tracking:
826
1626
  from ..advanced_tracker import AdvancedTracker
@@ -860,27 +1660,35 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
860
1660
  recognized_persons = {}
861
1661
  current_frame_staff_details = {}
862
1662
 
863
- # print("------------------TRACKER INIT END----------------------------")
864
- # print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
865
- # print("------------------TRACKER INIT END----------------------------")
1663
+
1664
+ # Extract camera info and fetch location name
1665
+ camera_info_extracted = self._extract_camera_info_from_stream(stream_info)
1666
+ camera_name = camera_info_extracted.get("camera_name", "")
1667
+ camera_id = camera_info_extracted.get("camera_id", "")
1668
+ location_id = camera_info_extracted.get("location_id", "")
1669
+
1670
+ # Fetch actual location name from API
1671
+ location_name = await self._fetch_location_name(location_id)
1672
+ self.logger.debug(f"Using location_name: '{location_name}', camera_name: '{camera_name}', camera_id: '{camera_id}'")
866
1673
 
867
1674
  # Process face recognition for each detection (if enabled)
868
1675
  if config.enable_face_recognition:
869
1676
  # Additional safety check: verify embeddings are still loaded and ready
870
- if not self.embedding_manager or not self.embedding_manager.is_ready():
871
- status = self.embedding_manager.get_status() if self.embedding_manager else {}
872
- error_msg = f"CRITICAL: Cannot process face recognition - embeddings not ready. Status: {status}"
873
- self.logger.error(error_msg)
874
- print(f"ERROR: {error_msg}")
875
- return self.create_error_result(
876
- error_msg,
877
- usecase=self.name,
878
- category=self.category,
879
- context=context,
880
- )
1677
+ # if not self.embedding_manager or not self.embedding_manager.is_ready():
1678
+ # status = self.embedding_manager.get_status() if self.embedding_manager else {}
1679
+ # error_msg = f"CRITICAL: Cannot process face recognition - embeddings not ready. Status: {status}"
1680
+ # self.logger.error(error_msg)
1681
+ # print(f"ERROR: {error_msg}")
1682
+ # return self.create_error_result(
1683
+ # error_msg,
1684
+ # usecase=self.name,
1685
+ # category=self.category,
1686
+ # context=context,
1687
+ # )
881
1688
 
882
1689
  face_recognition_result = await self._process_face_recognition(
883
- processed_data, config, stream_info, input_bytes
1690
+ processed_data, config, stream_info, input_bytes,
1691
+ camera_name=camera_name, camera_id=camera_id, location_name=location_name
884
1692
  )
885
1693
  processed_data, current_recognized_count, current_unknown_count, recognized_persons, current_frame_staff_details = face_recognition_result
886
1694
  else:
@@ -891,9 +1699,6 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
891
1699
  detection["recognition_status"] = "disabled"
892
1700
  detection["enrolled"] = False
893
1701
 
894
- # print("------------------FACE RECONG CONFIG ENABLED----------------------------")
895
- # print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
896
- # print("------------------FACE RECONG CONFIG ENABLED----------------------------")
897
1702
 
898
1703
  # Update tracking state for total count per label
899
1704
  self._update_tracking_state(processed_data)
@@ -927,9 +1732,6 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
927
1732
  current_recognized_count, current_unknown_count, recognized_persons
928
1733
  ))
929
1734
 
930
- # print("------------------TILL FACE RECOG SUMMARY----------------------------")
931
- # print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
932
- # print("------------------TILL FACE RECOG SUMMARY----------------------------")
933
1735
 
934
1736
  # Add detections to the counting summary (standard pattern for detection use cases)
935
1737
  # Ensure display label is present for UI (does not affect logic/counters)
@@ -962,10 +1764,6 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
962
1764
  )
963
1765
  summary = summary_list[0] if summary_list else {}
964
1766
 
965
- # print("------------------TILL TRACKING STATS----------------------------")
966
- # print("LATENCY:",(time.time() - processing_start)*1000,"| Throughput fps:",(1.0 / (time.time() - processing_start)) if (time.time() - processing_start) > 0 else None)
967
- # print("------------------TILL TRACKING STATS----------------------------")
968
-
969
1767
 
970
1768
  agg_summary = {
971
1769
  str(frame_number): {
@@ -1039,6 +1837,12 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1039
1837
 
1040
1838
  return processed_data
1041
1839
 
1840
+ def _build_search_id(self, track_key: Any, frame_id: Optional[Any]) -> str:
1841
+ """Generate a deterministic Redis search identifier per detection."""
1842
+ base_frame = frame_id if frame_id is not None else self._total_frame_counter
1843
+ safe_track = str(track_key if track_key is not None else "na").replace(" ", "_")
1844
+ return f"face_{base_frame}_{safe_track}"
1845
+
1042
1846
  def _extract_frame_from_data(self, input_bytes: bytes) -> Optional[np.ndarray]:
1043
1847
  """
1044
1848
  Extract frame from original model data
@@ -1075,6 +1879,9 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1075
1879
  config: FaceRecognitionEmbeddingConfig,
1076
1880
  stream_info: Optional[Dict[str, Any]] = None,
1077
1881
  input_bytes: Optional[bytes] = None,
1882
+ camera_name: str = "",
1883
+ camera_id: str = "",
1884
+ location_name: str = "",
1078
1885
  ) -> List[Dict]:
1079
1886
  """Process face recognition for each detection with embeddings"""
1080
1887
 
@@ -1111,10 +1918,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1111
1918
  )
1112
1919
  self._frame_warning_logged = True
1113
1920
 
1114
- # Get location from stream_info
1115
- location = (
1116
- stream_info.get("camera_location", "unknown") if stream_info else "unknown"
1117
- )
1921
+ # Use the location_name passed from process() (fetched from API)
1922
+ location = location_name if location_name else "Entry Reception"
1118
1923
 
1119
1924
  # Generate current timestamp
1120
1925
  current_timestamp = datetime.now(timezone.utc).isoformat()
@@ -1128,7 +1933,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1128
1933
  processed_detection = await self._process_face(
1129
1934
  detection, current_frame, location, current_timestamp, config,
1130
1935
  current_recognized_count, current_unknown_count,
1131
- recognized_persons, current_frame_staff_details
1936
+ recognized_persons, current_frame_staff_details,
1937
+ camera_name=camera_name, camera_id=camera_id
1132
1938
  )
1133
1939
  # print("------------------WHOLE FACE RECOG PROCESSING DETECTION----------------------------")
1134
1940
  # print("LATENCY:",(time.time() - st1)*1000,"| Throughput fps:",(1.0 / (time.time() - st1)) if (time.time() - st1) > 0 else None)
@@ -1160,6 +1966,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1160
1966
  current_unknown_count: int = 0,
1161
1967
  recognized_persons: Dict = None,
1162
1968
  current_frame_staff_details: Dict = None,
1969
+ camera_name: str = "",
1970
+ camera_id: str = "",
1163
1971
  ) -> Dict:
1164
1972
 
1165
1973
  # Extract and validate embedding using EmbeddingManager
@@ -1184,6 +1992,9 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1184
1992
  h_box = max(1, y2 - y1)
1185
1993
  frame_id = detection.get("frame_id", None) #TODO: Maybe replace this with stream_info frame_id
1186
1994
 
1995
+ track_key = track_id if track_id is not None else f"no_track_{id(detection)}"
1996
+ search_id = self._build_search_id(track_key, frame_id)
1997
+
1187
1998
  # Track probation age strictly by internal tracker id
1188
1999
  if track_id is not None:
1189
2000
  if track_id not in self._track_first_seen:
@@ -1207,7 +2018,6 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1207
2018
  detection_type = "unknown"
1208
2019
  try:
1209
2020
  if self.temporal_identity_manager:
1210
- track_key = track_id if track_id is not None else f"no_track_{id(detection)}"
1211
2021
  if not eligible_for_recognition:
1212
2022
  # Mirror compare_similarity: when not eligible, keep stable label if present
1213
2023
  s = self.temporal_identity_manager.tracks.get(track_key, {})
@@ -1243,6 +2053,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1243
2053
  eligible_for_recognition=True,
1244
2054
  location=location,
1245
2055
  timestamp=current_timestamp,
2056
+ search_id=search_id,
1246
2057
  )
1247
2058
  # print("------------------FACE RECOG TEMPORAL IDENTITY MANAGER UPDATE----------------------------")
1248
2059
  # print("LATENCY:",(time.time() - st3)*1000,"| Throughput fps:",(1.0 / (time.time() - st3)) if (time.time() - st3) > 0 else None)
@@ -1330,6 +2141,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1330
2141
  detection=detection,
1331
2142
  current_frame=current_frame,
1332
2143
  location=location,
2144
+ camera_name=camera_name,
2145
+ camera_id=camera_id,
1333
2146
  )
1334
2147
  # print("------------------FACE RECOG ENQUEUEING DETECTION FOR ACTIVITY LOGGING----------------------------")
1335
2148
  # print("LATENCY:",(time.time() - st4)*1000,"| Throughput fps:",(1.0 / (time.time() - st4)) if (time.time() - st4) > 0 else None)
@@ -1592,19 +2405,19 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1592
2405
 
1593
2406
  # Build current_counts array in expected format
1594
2407
  current_counts = []
1595
- for cat, count in per_category_count.items():
1596
- if count > 0 or total_detections > 0:
1597
- current_counts.append({"category": cat, "count": count})
2408
+ # for cat, count in per_category_count.items():
2409
+ # if count > 0 or total_detections > 0:
2410
+ # current_counts.append({"category": cat, "count": count})
1598
2411
 
1599
2412
  # Add face recognition specific current counts
1600
2413
  current_frame = face_summary.get("current_frame", {})
1601
2414
  current_counts.extend(
1602
2415
  [
1603
2416
  {
1604
- "category": "recognized_faces",
2417
+ "category": "Recognized Faces",
1605
2418
  "count": current_frame.get("recognized", 0),
1606
2419
  },
1607
- {"category": "unknown_faces", "count": current_frame.get("unknown", 0)},
2420
+ {"category": "Unknown Faces", "count": current_frame.get("unknown", 0)},
1608
2421
  ]
1609
2422
  )
1610
2423
 
@@ -1667,7 +2480,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1667
2480
  )
1668
2481
 
1669
2482
 
1670
- human_text_lines = [f"CURRENT FRAME @ {current_timestamp}"]
2483
+ human_text_lines = [f"CURRENT FRAME @ {current_timestamp}:"]
1671
2484
 
1672
2485
  current_recognized = current_frame.get("recognized", 0)
1673
2486
  current_unknown = current_frame.get("unknown", 0)
@@ -1675,8 +2488,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1675
2488
  total_current = current_recognized + current_unknown
1676
2489
 
1677
2490
  # Show staff names and IDs being recognized in current frame (with tabs)
1678
- human_text_lines.append(f"\tCurrent Total Faces: {total_current}")
1679
- human_text_lines.append(f"\tCurrent Recognized: {current_recognized}")
2491
+ human_text_lines.append(f"\t- Current Total Faces: {total_current}")
2492
+ human_text_lines.append(f"\t- Current Recognized: {current_recognized}")
1680
2493
 
1681
2494
  if recognized_persons:
1682
2495
  for person_id in recognized_persons.keys():
@@ -1684,15 +2497,15 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1684
2497
  staff_name = (current_frame_staff_details or {}).get(
1685
2498
  person_id, f"Staff {person_id}"
1686
2499
  )
1687
- human_text_lines.append(f"\tName: {staff_name} (ID: {person_id})")
1688
- human_text_lines.append(f"\tCurrent Unknown: {current_unknown}")
2500
+ human_text_lines.append(f"\t\t- Name: {staff_name} (ID: {person_id})")
2501
+ human_text_lines.append(f"\t- Current Unknown: {current_unknown}")
1689
2502
 
1690
2503
  # Show current frame counts only (with tabs)
1691
2504
  human_text_lines.append("")
1692
- human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}")
1693
- human_text_lines.append(f"\tTotal Faces: {cumulative_total}")
1694
- human_text_lines.append(f"\tRecognized: {face_summary.get('session_totals',{}).get('total_recognized', 0)}")
1695
- human_text_lines.append(f"\tUnknown: {face_summary.get('session_totals',{}).get('total_unknown', 0)}")
2505
+ # human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}")
2506
+ # human_text_lines.append(f"\tTotal Faces: {cumulative_total}")
2507
+ # human_text_lines.append(f"\tRecognized: {face_summary.get('session_totals',{}).get('total_recognized', 0)}")
2508
+ # human_text_lines.append(f"\tUnknown: {face_summary.get('session_totals',{}).get('total_unknown', 0)}")
1696
2509
  # Additional counts similar to compare_similarity HUD
1697
2510
  # try:
1698
2511
  # human_text_lines.append(f"\tCurrent Faces (detections): {total_detections}")
@@ -1702,15 +2515,15 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1702
2515
 
1703
2516
  human_text = "\n".join(human_text_lines)
1704
2517
 
1705
- if alerts:
1706
- for alert in alerts:
1707
- human_text_lines.append(
1708
- f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}"
1709
- )
1710
- else:
1711
- human_text_lines.append("Alerts: None")
2518
+ # if alerts:
2519
+ # for alert in alerts:
2520
+ # human_text_lines.append(
2521
+ # f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}"
2522
+ # )
2523
+ # else:
2524
+ # human_text_lines.append("Alerts: None")
1712
2525
 
1713
- human_text = "\n".join(human_text_lines)
2526
+ # human_text = "\n".join(human_text_lines)
1714
2527
  reset_settings = [
1715
2528
  {"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}
1716
2529
  ]
@@ -1727,7 +2540,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
1727
2540
  start_time=high_precision_start_timestamp,
1728
2541
  reset_time=high_precision_reset_timestamp,
1729
2542
  )
1730
-
2543
+ tracking_stat['target_categories'] = ['Recognized Faces', 'Unknown Faces']
1731
2544
  tracking_stats.append(tracking_stat)
1732
2545
  return tracking_stats
1733
2546