matrice-analytics 0.1.3__py3-none-any.whl → 0.1.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-analytics might be problematic. Click here for more details.
- matrice_analytics/post_processing/advanced_tracker/matching.py +3 -3
- matrice_analytics/post_processing/advanced_tracker/strack.py +1 -1
- matrice_analytics/post_processing/config.py +4 -0
- matrice_analytics/post_processing/core/config.py +115 -12
- matrice_analytics/post_processing/face_reg/compare_similarity.py +5 -5
- matrice_analytics/post_processing/face_reg/embedding_manager.py +109 -8
- matrice_analytics/post_processing/face_reg/face_recognition.py +157 -61
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +339 -88
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +67 -29
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +0 -1
- matrice_analytics/post_processing/post_processor.py +32 -11
- matrice_analytics/post_processing/usecases/color/clip.py +42 -8
- matrice_analytics/post_processing/usecases/color/color_mapper.py +2 -2
- matrice_analytics/post_processing/usecases/color_detection.py +50 -129
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +41 -386
- matrice_analytics/post_processing/usecases/flare_analysis.py +1 -56
- matrice_analytics/post_processing/usecases/license_plate_detection.py +476 -202
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +351 -26
- matrice_analytics/post_processing/usecases/people_counting.py +408 -1431
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +39 -10
- matrice_analytics/post_processing/utils/__init__.py +8 -8
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.32.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.32.dist-info}/RECORD +61 -26
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.32.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.32.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.3.dist-info → matrice_analytics-0.1.32.dist-info}/top_level.txt +0 -0
|
@@ -14,6 +14,19 @@ Configuration options:
|
|
|
14
14
|
- cache_max_size: Maximum number of cached track IDs (default: 1000)
|
|
15
15
|
- cache_ttl: Cache time-to-live in seconds (default: 3600)
|
|
16
16
|
"""
|
|
17
|
+
import subprocess
|
|
18
|
+
import logging
|
|
19
|
+
import asyncio
|
|
20
|
+
import os
|
|
21
|
+
log_file = open("pip_jetson_btii.log", "w")
|
|
22
|
+
cmd = ["pip", "install", "httpx"]
|
|
23
|
+
subprocess.run(
|
|
24
|
+
cmd,
|
|
25
|
+
stdout=log_file,
|
|
26
|
+
stderr=subprocess.STDOUT,
|
|
27
|
+
preexec_fn=os.setpgrp
|
|
28
|
+
)
|
|
29
|
+
log_file.close()
|
|
17
30
|
|
|
18
31
|
from typing import Any, Dict, List, Optional, Tuple
|
|
19
32
|
import time
|
|
@@ -48,6 +61,8 @@ from .embedding_manager import EmbeddingManager, EmbeddingConfig
|
|
|
48
61
|
from collections import deque, defaultdict
|
|
49
62
|
|
|
50
63
|
|
|
64
|
+
|
|
65
|
+
|
|
51
66
|
def _normalize_embedding(vec: List[float]) -> List[float]:
|
|
52
67
|
"""Normalize an embedding vector to unit length (L2). Returns float32 list."""
|
|
53
68
|
arr = np.asarray(vec, dtype=np.float32)
|
|
@@ -79,6 +94,7 @@ class TemporalIdentityManager:
|
|
|
79
94
|
switch_patience: int = 5,
|
|
80
95
|
fallback_margin: float = 0.05,
|
|
81
96
|
) -> None:
|
|
97
|
+
self.logger = logging.getLogger(__name__)
|
|
82
98
|
self.face_client = face_client
|
|
83
99
|
self.threshold = float(recognition_threshold)
|
|
84
100
|
self.history_size = int(history_size)
|
|
@@ -118,14 +134,13 @@ class TemporalIdentityManager:
|
|
|
118
134
|
location=location,
|
|
119
135
|
timestamp=timestamp,
|
|
120
136
|
)
|
|
121
|
-
except Exception:
|
|
137
|
+
except Exception as e:
|
|
138
|
+
self.logger.error(f"API ERROR: Failed to search similar faces in _compute_best_identity: {e}", exc_info=True)
|
|
122
139
|
return None, "Unknown", 0.0, None, {}, "unknown"
|
|
123
140
|
|
|
124
141
|
try:
|
|
125
142
|
results: List[Any] = []
|
|
126
|
-
|
|
127
|
-
print(resp)
|
|
128
|
-
print('----------------------------------------------------------')
|
|
143
|
+
self.logger.debug('API Response received for identity search')
|
|
129
144
|
if isinstance(resp, dict):
|
|
130
145
|
if isinstance(resp.get("data"), list):
|
|
131
146
|
results = resp.get("data", [])
|
|
@@ -137,12 +152,11 @@ class TemporalIdentityManager:
|
|
|
137
152
|
results = resp
|
|
138
153
|
|
|
139
154
|
if not results:
|
|
155
|
+
self.logger.debug("No identity match found from API")
|
|
140
156
|
return None, "Unknown", 0.0, None, {}, "unknown"
|
|
141
157
|
|
|
142
158
|
item = results[0] if isinstance(results, list) else results
|
|
143
|
-
|
|
144
|
-
print(item)
|
|
145
|
-
print('----------------------------------------------------------')
|
|
159
|
+
self.logger.debug(f'Top-1 match from API: {item}')
|
|
146
160
|
# Be defensive with keys and types
|
|
147
161
|
staff_id = item.get("staffId") if isinstance(item, dict) else None
|
|
148
162
|
employee_id = str(item.get("_id")) if isinstance(item, dict) and item.get("_id") is not None else None
|
|
@@ -162,9 +176,12 @@ class TemporalIdentityManager:
|
|
|
162
176
|
person_name = f"{first_name or ''} {last_name or ''}".strip() or "UnknowNN" #TODO:ebugging change to normal once done
|
|
163
177
|
# If API says unknown or missing staff_id, treat as unknown
|
|
164
178
|
if not staff_id: #or detection_type == "unknown"
|
|
179
|
+
self.logger.debug(f"API returned unknown or missing staff_id - score={score}, employee_id={employee_id}")
|
|
165
180
|
return None, "Unknown", float(score), employee_id, staff_details if isinstance(staff_details, dict) else {}, "unknown"
|
|
181
|
+
self.logger.info(f"API identified face - staff_id={staff_id}, person_name={person_name}, score={score:.3f}")
|
|
166
182
|
return str(staff_id), person_name, float(score), employee_id, staff_details if isinstance(staff_details, dict) else {}, "known"
|
|
167
|
-
except Exception:
|
|
183
|
+
except Exception as e:
|
|
184
|
+
self.logger.error(f"Error parsing API response in _compute_best_identity: {e}", exc_info=True)
|
|
168
185
|
return None, "Unknown", 0.0, None, {}, "unknown"
|
|
169
186
|
|
|
170
187
|
async def _compute_best_identity_from_history(self, track_state: Dict[str, object], location: str = "", timestamp: str = "") -> Tuple[Optional[str], str, float, Optional[str], Dict[str, Any], str]:
|
|
@@ -172,9 +189,11 @@ class TemporalIdentityManager:
|
|
|
172
189
|
if not hist:
|
|
173
190
|
return None, "Unknown", 0.0, None, {}, "unknown"
|
|
174
191
|
try:
|
|
192
|
+
self.logger.debug(f"Computing identity from embedding history - history_size={len(hist)}")
|
|
175
193
|
proto = np.mean(np.asarray(list(hist), dtype=np.float32), axis=0)
|
|
176
194
|
proto_list = proto.tolist() if isinstance(proto, np.ndarray) else list(proto)
|
|
177
|
-
except Exception:
|
|
195
|
+
except Exception as e:
|
|
196
|
+
self.logger.error(f"Error computing prototype from history: {e}", exc_info=True)
|
|
178
197
|
proto_list = []
|
|
179
198
|
return await self._compute_best_identity(proto_list, location=location, timestamp=timestamp)
|
|
180
199
|
|
|
@@ -337,6 +356,7 @@ class FaceRecognitionEmbeddingConfig(BaseConfig):
|
|
|
337
356
|
|
|
338
357
|
facial_recognition_server_id: str = ""
|
|
339
358
|
session: Any = None # Matrice session for face recognition client
|
|
359
|
+
deployment_id: Optional[str] = None # deployment ID for update_deployment call
|
|
340
360
|
|
|
341
361
|
# Embedding configuration
|
|
342
362
|
embedding_config: Optional[Any] = None # Will be set to EmbeddingConfig instance
|
|
@@ -356,7 +376,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
356
376
|
# Human-friendly display names for categories
|
|
357
377
|
CATEGORY_DISPLAY = {"face": "face"}
|
|
358
378
|
|
|
359
|
-
def __init__(self):
|
|
379
|
+
def __init__(self, config: Optional[FaceRecognitionEmbeddingConfig] = None):
|
|
360
380
|
super().__init__("face_recognition")
|
|
361
381
|
self.category = "security"
|
|
362
382
|
|
|
@@ -413,11 +433,83 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
413
433
|
self._min_face_h: int = 30
|
|
414
434
|
|
|
415
435
|
self.start_timer = None
|
|
436
|
+
|
|
437
|
+
# Store config for async initialization
|
|
438
|
+
self._default_config = config
|
|
439
|
+
self._initialized = False
|
|
440
|
+
|
|
441
|
+
# Don't call asyncio.run() in __init__ - it will fail if called from async context
|
|
442
|
+
# Initialization must be done by calling await initialize(config) after instantiation
|
|
443
|
+
# This is handled in PostProcessor._get_use_case_instance()
|
|
444
|
+
|
|
445
|
+
async def initialize(self, config: Optional[FaceRecognitionEmbeddingConfig] = None) -> None:
|
|
446
|
+
"""
|
|
447
|
+
Async initialization method to set up face client and all components.
|
|
448
|
+
Must be called after __init__ before process() can be called.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
config: Optional config to use. If not provided, uses config from __init__.
|
|
452
|
+
"""
|
|
453
|
+
if self._initialized:
|
|
454
|
+
self.logger.debug("Use case already initialized, skipping")
|
|
455
|
+
return
|
|
456
|
+
|
|
457
|
+
# Use provided config or fall back to default config from __init__
|
|
458
|
+
init_config = config or self._default_config
|
|
459
|
+
|
|
460
|
+
if not init_config:
|
|
461
|
+
raise ValueError("No config provided for initialization - config is required")
|
|
462
|
+
|
|
463
|
+
# Validate config type
|
|
464
|
+
if not isinstance(init_config, FaceRecognitionEmbeddingConfig):
|
|
465
|
+
raise TypeError(f"Invalid config type for initialization: {type(init_config)}, expected FaceRecognitionEmbeddingConfig")
|
|
466
|
+
|
|
467
|
+
self.logger.info("Initializing face recognition use case with provided config")
|
|
468
|
+
|
|
469
|
+
# Initialize face client (includes deployment update)
|
|
470
|
+
try:
|
|
471
|
+
self.face_client = await self._get_facial_recognition_client(init_config)
|
|
472
|
+
|
|
473
|
+
# Initialize People activity logging if enabled
|
|
474
|
+
if init_config.enable_people_activity_logging:
|
|
475
|
+
self.people_activity_logging = PeopleActivityLogging(self.face_client)
|
|
476
|
+
self.people_activity_logging.start_background_processing()
|
|
477
|
+
self.logger.info("People activity logging enabled and started")
|
|
478
|
+
|
|
479
|
+
# Initialize EmbeddingManager
|
|
480
|
+
if not init_config.embedding_config:
|
|
481
|
+
init_config.embedding_config = EmbeddingConfig(
|
|
482
|
+
similarity_threshold=init_config.similarity_threshold,
|
|
483
|
+
confidence_threshold=init_config.confidence_threshold,
|
|
484
|
+
enable_track_id_cache=init_config.enable_track_id_cache,
|
|
485
|
+
cache_max_size=init_config.cache_max_size,
|
|
486
|
+
cache_ttl=3600
|
|
487
|
+
)
|
|
488
|
+
self.embedding_manager = EmbeddingManager(init_config.embedding_config, self.face_client)
|
|
489
|
+
self.logger.info("Embedding manager initialized")
|
|
490
|
+
|
|
491
|
+
# Initialize TemporalIdentityManager
|
|
492
|
+
self.temporal_identity_manager = TemporalIdentityManager(
|
|
493
|
+
face_client=self.face_client,
|
|
494
|
+
recognition_threshold=float(init_config.similarity_threshold),
|
|
495
|
+
history_size=20,
|
|
496
|
+
unknown_patience=7,
|
|
497
|
+
switch_patience=5,
|
|
498
|
+
fallback_margin=0.05,
|
|
499
|
+
)
|
|
500
|
+
self.logger.info("Temporal identity manager initialized")
|
|
501
|
+
|
|
502
|
+
self._initialized = True
|
|
503
|
+
self.logger.info("Face recognition use case fully initialized")
|
|
504
|
+
|
|
505
|
+
except Exception as e:
|
|
506
|
+
self.logger.error(f"Error during use case initialization: {e}", exc_info=True)
|
|
507
|
+
raise RuntimeError(f"Failed to initialize face recognition use case: {e}") from e
|
|
416
508
|
|
|
417
|
-
def _get_facial_recognition_client(
|
|
509
|
+
async def _get_facial_recognition_client(
|
|
418
510
|
self, config: FaceRecognitionEmbeddingConfig
|
|
419
511
|
) -> FacialRecognitionClient:
|
|
420
|
-
"""Get facial recognition client"""
|
|
512
|
+
"""Get facial recognition client and update deployment"""
|
|
421
513
|
# Initialize face recognition client if not already done
|
|
422
514
|
if self.face_client is None:
|
|
423
515
|
self.logger.info(
|
|
@@ -427,6 +519,20 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
427
519
|
server_id=config.facial_recognition_server_id, session=config.session
|
|
428
520
|
)
|
|
429
521
|
self.logger.info("Face recognition client initialized")
|
|
522
|
+
|
|
523
|
+
# Call update_deployment if deployment_id is provided
|
|
524
|
+
if config.deployment_id:
|
|
525
|
+
try:
|
|
526
|
+
self.logger.info(f"Updating deployment with ID: {config.deployment_id}")
|
|
527
|
+
response = await self.face_client.update_deployment(config.deployment_id)
|
|
528
|
+
if response.get('success', False):
|
|
529
|
+
self.logger.info(f"Successfully updated deployment {config.deployment_id}")
|
|
530
|
+
else:
|
|
531
|
+
self.logger.warning(f"Failed to update deployment: {response.get('error', 'Unknown error')}")
|
|
532
|
+
except Exception as e:
|
|
533
|
+
self.logger.error(f"Exception while updating deployment: {e}", exc_info=True)
|
|
534
|
+
else:
|
|
535
|
+
self.logger.debug("No deployment_id provided, skipping deployment update")
|
|
430
536
|
|
|
431
537
|
return self.face_client
|
|
432
538
|
|
|
@@ -454,44 +560,28 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
454
560
|
category=self.category,
|
|
455
561
|
context=context,
|
|
456
562
|
)
|
|
457
|
-
|
|
458
|
-
|
|
563
|
+
if context is None:
|
|
564
|
+
context = ProcessingContext()
|
|
565
|
+
|
|
566
|
+
# Defensive check: Ensure context is ProcessingContext object (production safety)
|
|
567
|
+
# This handles edge cases where parameter mismatch might pass a dict as context
|
|
568
|
+
if not isinstance(context, ProcessingContext):
|
|
569
|
+
self.logger.warning(
|
|
570
|
+
f"Context parameter is not ProcessingContext (got {type(context).__name__}, {context}). "
|
|
571
|
+
"Creating new ProcessingContext. This may indicate a parameter mismatch in the caller."
|
|
572
|
+
)
|
|
573
|
+
context = ProcessingContext()
|
|
459
574
|
|
|
460
|
-
|
|
461
|
-
|
|
575
|
+
# Ensure use case is initialized (should be done in _get_use_case_instance, not lazy loaded)
|
|
576
|
+
if not self._initialized:
|
|
577
|
+
raise RuntimeError(
|
|
578
|
+
"Face recognition use case not initialized. "
|
|
579
|
+
"This should be initialized eagerly in PostProcessor._get_use_case_instance()"
|
|
580
|
+
)
|
|
462
581
|
|
|
582
|
+
# Ensure confidence threshold is set
|
|
463
583
|
if not config.confidence_threshold:
|
|
464
584
|
config.confidence_threshold = 0.35
|
|
465
|
-
|
|
466
|
-
# Initialize People activity logging if enabled
|
|
467
|
-
if config.enable_people_activity_logging and not self.people_activity_logging:
|
|
468
|
-
self.people_activity_logging = PeopleActivityLogging(self.face_client)
|
|
469
|
-
self.people_activity_logging.start_background_processing()
|
|
470
|
-
self.logger.info("People activity logging enabled and started")
|
|
471
|
-
|
|
472
|
-
# Initialize EmbeddingManager if not already done
|
|
473
|
-
if not self.embedding_manager:
|
|
474
|
-
# Create default embedding config if not provided
|
|
475
|
-
if not config.embedding_config:
|
|
476
|
-
config.embedding_config = EmbeddingConfig(
|
|
477
|
-
similarity_threshold=config.similarity_threshold,
|
|
478
|
-
confidence_threshold=config.confidence_threshold,
|
|
479
|
-
enable_track_id_cache=config.enable_track_id_cache,
|
|
480
|
-
cache_max_size=config.cache_max_size,
|
|
481
|
-
cache_ttl=3600
|
|
482
|
-
)
|
|
483
|
-
self.embedding_manager = EmbeddingManager(config.embedding_config, self.face_client)
|
|
484
|
-
|
|
485
|
-
# Initialize TemporalIdentityManager (top-1 via API with smoothing)
|
|
486
|
-
if not self.temporal_identity_manager:
|
|
487
|
-
self.temporal_identity_manager = TemporalIdentityManager(
|
|
488
|
-
face_client=self.face_client,
|
|
489
|
-
recognition_threshold=float(config.similarity_threshold),
|
|
490
|
-
history_size=20,
|
|
491
|
-
unknown_patience=7,
|
|
492
|
-
switch_patience=5,
|
|
493
|
-
fallback_margin=0.05,
|
|
494
|
-
)
|
|
495
585
|
|
|
496
586
|
|
|
497
587
|
# Detect input format and store in context
|
|
@@ -1285,23 +1375,23 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1285
1375
|
detections = []
|
|
1286
1376
|
for detection in counting_summary.get("detections", []):
|
|
1287
1377
|
bbox = detection.get("bounding_box", {})
|
|
1288
|
-
category = detection.get("display_name", "")
|
|
1289
|
-
|
|
1378
|
+
category = detection.get("display_name", "")
|
|
1379
|
+
|
|
1290
1380
|
detection_obj = self.create_detection_object(category, bbox)
|
|
1291
1381
|
# Add face recognition specific fields
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1382
|
+
detection_obj.update(
|
|
1383
|
+
{
|
|
1384
|
+
"person_id": detection.get("person_id"),
|
|
1385
|
+
# Use display_name for front-end label suppression policy
|
|
1386
|
+
"person_name": detection.get("display_name", ""),
|
|
1387
|
+
# Explicit label field for UI overlays
|
|
1388
|
+
"label": detection.get("display_name", ""),
|
|
1389
|
+
"recognition_status": detection.get(
|
|
1390
|
+
"recognition_status", "unknown"
|
|
1391
|
+
),
|
|
1392
|
+
"enrolled": detection.get("enrolled", False),
|
|
1393
|
+
}
|
|
1394
|
+
)
|
|
1305
1395
|
detections.append(detection_obj)
|
|
1306
1396
|
|
|
1307
1397
|
# Build alert_settings array in expected format
|
|
@@ -1866,5 +1956,11 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1866
1956
|
try:
|
|
1867
1957
|
if hasattr(self, "people_activity_logging") and self.people_activity_logging:
|
|
1868
1958
|
self.people_activity_logging.stop_background_processing()
|
|
1959
|
+
except:
|
|
1960
|
+
pass
|
|
1961
|
+
|
|
1962
|
+
try:
|
|
1963
|
+
if hasattr(self, "embedding_manager") and self.embedding_manager:
|
|
1964
|
+
self.embedding_manager.stop_background_refresh()
|
|
1869
1965
|
except:
|
|
1870
1966
|
pass
|