matrice-analytics 0.1.89__py3-none-any.whl → 0.1.97__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/post_processing/__init__.py +21 -2
- matrice_analytics/post_processing/config.py +6 -0
- matrice_analytics/post_processing/core/config.py +102 -3
- matrice_analytics/post_processing/face_reg/face_recognition.py +146 -14
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +116 -4
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +19 -0
- matrice_analytics/post_processing/post_processor.py +12 -0
- matrice_analytics/post_processing/usecases/__init__.py +9 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +5 -2
- matrice_analytics/post_processing/usecases/color_detection.py +1 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +94 -14
- matrice_analytics/post_processing/usecases/footfall.py +750 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +91 -1
- matrice_analytics/post_processing/usecases/people_counting.py +55 -22
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +15 -32
- matrice_analytics/post_processing/usecases/vehicle_monitoring_drone_view.py +1007 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring_parking_lot.py +1011 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +2 -1
- matrice_analytics/post_processing/utils/alert_instance_utils.py +94 -26
- matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +97 -4
- matrice_analytics/post_processing/utils/incident_manager_utils.py +103 -6
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.97.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.97.dist-info}/RECORD +26 -23
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.97.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.97.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.97.dist-info}/top_level.txt +0 -0
|
@@ -84,7 +84,8 @@ from .usecases.field_mapping import FieldMappingConfig, FieldMappingUseCase
|
|
|
84
84
|
from .usecases.leaf_disease import LeafDiseaseDetectionConfig, LeafDiseaseDetectionUseCase
|
|
85
85
|
from .usecases.parking import ParkingConfig
|
|
86
86
|
from .usecases.abandoned_object_detection import AbandonedObjectConfig
|
|
87
|
-
|
|
87
|
+
from .usecases.footfall import FootFallConfig
|
|
88
|
+
from .usecases.vehicle_monitoring import VehicleMonitoringConfig
|
|
88
89
|
|
|
89
90
|
from .usecases.weld_defect_detection import WeldDefectConfig
|
|
90
91
|
from .usecases.weapon_detection import WeaponDetectionConfig
|
|
@@ -128,6 +129,9 @@ from .usecases.pcb_defect_detection import PCBDefectConfig, PCBDefectUseCase
|
|
|
128
129
|
from .usecases.underground_pipeline_defect_detection import UndergroundPipelineDefectConfig,UndergroundPipelineDefectUseCase
|
|
129
130
|
from .usecases.suspicious_activity_detection import SusActivityConfig, SusActivityUseCase
|
|
130
131
|
from .usecases.natural_disaster import NaturalDisasterConfig, NaturalDisasterUseCase
|
|
132
|
+
from .usecases.footfall import FootFallUseCase
|
|
133
|
+
from .usecases.vehicle_monitoring_parking_lot import VehicleMonitoringParkingLotUseCase, VehicleMonitoringParkingLotConfig
|
|
134
|
+
from .usecases.vehicle_monitoring_drone_view import VehicleMonitoringDroneViewUseCase, VehicleMonitoringDroneViewConfig
|
|
131
135
|
|
|
132
136
|
#Put all IMAGE based usecases here
|
|
133
137
|
from .usecases.blood_cancer_detection_img import BloodCancerDetectionConfig, BloodCancerDetectionUseCase
|
|
@@ -205,6 +209,9 @@ from .usecases import (
|
|
|
205
209
|
|
|
206
210
|
SusActivityUseCase,
|
|
207
211
|
NaturalDisasterUseCase,
|
|
212
|
+
FootFallUseCase,
|
|
213
|
+
VehicleMonitoringParkingLotUseCase,
|
|
214
|
+
VehicleMonitoringDroneViewUseCase,
|
|
208
215
|
|
|
209
216
|
#Put all IMAGE based usecases here
|
|
210
217
|
BloodCancerDetectionUseCase,
|
|
@@ -286,6 +293,9 @@ _pcb_defect_detection = PCBDefectUseCase()
|
|
|
286
293
|
_underground_pipeline_defect = UndergroundPipelineDefectUseCase()
|
|
287
294
|
_suspicious_activity_detection = SusActivityUseCase()
|
|
288
295
|
_natural_disaster = NaturalDisasterUseCase()
|
|
296
|
+
_footfall = FootFallUseCase()
|
|
297
|
+
_vehicle_monitoring_parking_lot = VehicleMonitoringParkingLotUseCase()
|
|
298
|
+
_vehicle_monitoring_drone_view = VehicleMonitoringDroneViewUseCase()
|
|
289
299
|
|
|
290
300
|
# Face recognition with embeddings
|
|
291
301
|
_face_recognition = FaceRecognitionEmbeddingUseCase()
|
|
@@ -370,6 +380,9 @@ registry.register_use_case(_pcb_defect_detection.category, _pcb_defect_detection
|
|
|
370
380
|
registry.register_use_case(_underground_pipeline_defect.category, _underground_pipeline_defect.name, UndergroundPipelineDefectUseCase)
|
|
371
381
|
registry.register_use_case(_suspicious_activity_detection.category, _suspicious_activity_detection.name, SusActivityUseCase)
|
|
372
382
|
registry.register_use_case(_natural_disaster.category, _natural_disaster.name, NaturalDisasterUseCase)
|
|
383
|
+
registry.register_use_case(_footfall.category, _footfall.name, FaceEmotionUseCase)
|
|
384
|
+
registry.register_use_case(_vehicle_monitoring_parking_lot.category, _vehicle_monitoring_parking_lot.name, VehicleMonitoringParkingLotUseCase)
|
|
385
|
+
registry.register_use_case(_vehicle_monitoring_drone_view.category, _vehicle_monitoring_drone_view.name, VehicleMonitoringDroneViewUseCase)
|
|
373
386
|
|
|
374
387
|
#Put all IMAGE based usecases here
|
|
375
388
|
registry.register_use_case(_blood_cancer_detection.category, _blood_cancer_detection.name, BloodCancerDetectionUseCase)
|
|
@@ -574,7 +587,10 @@ __all__ = [
|
|
|
574
587
|
'UndergroundPipelineDefectConfig',
|
|
575
588
|
'SusActivityConfig',
|
|
576
589
|
'NaturalDisasterConfig',
|
|
577
|
-
'VehiclePeopleDroneMonitoringConfig'
|
|
590
|
+
'VehiclePeopleDroneMonitoringConfig',
|
|
591
|
+
'FootFallConfig',
|
|
592
|
+
'VehicleMonitoringParkingLotConfig',
|
|
593
|
+
'VehicleMonitoringDroneViewConfig',
|
|
578
594
|
#Put all IMAGE based usecase CONFIGS here
|
|
579
595
|
'BloodCancerDetectionConfig',
|
|
580
596
|
'SkinCancerClassificationConfig',
|
|
@@ -648,6 +664,9 @@ __all__ = [
|
|
|
648
664
|
'UndergroundPipelineDefectUseCase',
|
|
649
665
|
'SusActivityUseCase',
|
|
650
666
|
'NaturalDisasterUseCase',
|
|
667
|
+
'FootFallUseCase',
|
|
668
|
+
'VehicleMonitoringParkingLotUseCase',
|
|
669
|
+
'VehicleMonitoringDroneViewUseCase',
|
|
651
670
|
|
|
652
671
|
#Put all IMAGE based usecases here
|
|
653
672
|
'BloodCancerDetectionUseCase',
|
|
@@ -65,6 +65,9 @@ APP_NAME_TO_USECASE = {
|
|
|
65
65
|
"underground_pipeline_defect" : "underground_pipeline_defect",
|
|
66
66
|
"suspicious_activity_detection": "suspicious_activity_detection",
|
|
67
67
|
"natural_disaster_detection": "natural_disaster_detection",
|
|
68
|
+
"Foot Fall": "footfall",
|
|
69
|
+
"vehicle_monitoring_parking_lot": "vehicle_monitoring_parking_lot",
|
|
70
|
+
"vehicle_monitoring_drone_view": "vehicle_monitoring_drone_view",
|
|
68
71
|
}
|
|
69
72
|
|
|
70
73
|
APP_NAME_TO_CATEGORY = {
|
|
@@ -135,6 +138,9 @@ APP_NAME_TO_CATEGORY = {
|
|
|
135
138
|
"underground_pipeline_defect" : "general",
|
|
136
139
|
"suspicious_activity_detection": "security",
|
|
137
140
|
"natural_disaster_detection": "environmental",
|
|
141
|
+
"Foot Fall": "retail",
|
|
142
|
+
"vehicle_monitoring_parking_lot": "traffic",
|
|
143
|
+
"vehicle_monitoring_drone_view": "traffic",
|
|
138
144
|
}
|
|
139
145
|
|
|
140
146
|
def get_usecase_from_app_name(app_name: str) -> str:
|
|
@@ -352,14 +352,19 @@ class AlertConfig:
|
|
|
352
352
|
@dataclass
|
|
353
353
|
class PeopleCountingConfig(BaseConfig):
|
|
354
354
|
"""Configuration for people counting use case."""
|
|
355
|
-
|
|
355
|
+
|
|
356
356
|
# Smoothing configuration
|
|
357
357
|
enable_smoothing: bool = True
|
|
358
358
|
smoothing_algorithm: str = "observability" # "window" or "observability"
|
|
359
359
|
smoothing_window_size: int = 20
|
|
360
360
|
smoothing_cooldown_frames: int = 5
|
|
361
361
|
smoothing_confidence_range_factor: float = 0.5
|
|
362
|
-
|
|
362
|
+
|
|
363
|
+
# ====== PERFORMANCE: Tracker selection (both disabled by default for max throughput) ======
|
|
364
|
+
enable_advanced_tracker: bool = False # Heavy O(n³) tracker - enable only when tracking quality is critical
|
|
365
|
+
enable_simple_tracker: bool = False # Lightweight O(n) tracker - fast but no cross-frame persistence
|
|
366
|
+
# ====== END PERFORMANCE CONFIG ======
|
|
367
|
+
|
|
363
368
|
# Zone configuration
|
|
364
369
|
zone_config: Optional[ZoneConfig] = None
|
|
365
370
|
|
|
@@ -901,6 +906,9 @@ class ConfigManager:
|
|
|
901
906
|
'underground_pipeline_defect' : None,
|
|
902
907
|
'suspicious_activity_detection': None,
|
|
903
908
|
'natural_disaster_detection': None,
|
|
909
|
+
'footfall': None,
|
|
910
|
+
'vehicle_monitoring_parking_lot': None,
|
|
911
|
+
'vehicle_monitoring_drone_view': None,
|
|
904
912
|
|
|
905
913
|
#Put all image based usecases here::
|
|
906
914
|
'blood_cancer_detection_img': None,
|
|
@@ -1405,7 +1413,31 @@ class ConfigManager:
|
|
|
1405
1413
|
return NaturalDisasterConfig
|
|
1406
1414
|
except ImportError:
|
|
1407
1415
|
return None
|
|
1408
|
-
|
|
1416
|
+
|
|
1417
|
+
def footfall_detection_config_class(self):
|
|
1418
|
+
"""Register a configuration class for a use case."""
|
|
1419
|
+
try:
|
|
1420
|
+
from ..usecases.footfall import FootFallConfig
|
|
1421
|
+
return FootFallConfig
|
|
1422
|
+
except ImportError:
|
|
1423
|
+
return None
|
|
1424
|
+
|
|
1425
|
+
def vehicle_monitoring_parking_lot_config_class(self):
|
|
1426
|
+
"""Register a configuration class for a use case."""
|
|
1427
|
+
try:
|
|
1428
|
+
from ..usecases.vehicle_monitoring_parking_lot import VehicleMonitoringParkingLotConfig
|
|
1429
|
+
return VehicleMonitoringParkingLotConfig
|
|
1430
|
+
except ImportError:
|
|
1431
|
+
return None
|
|
1432
|
+
|
|
1433
|
+
def vehicle_monitoring_drone_view_config_class(self):
|
|
1434
|
+
"""Register a configuration class for a use case."""
|
|
1435
|
+
try:
|
|
1436
|
+
from ..usecases.vehicle_monitoring_drone_view import VehicleMonitoringDroneViewConfig
|
|
1437
|
+
return VehicleMonitoringDroneViewConfig
|
|
1438
|
+
except ImportError:
|
|
1439
|
+
return None
|
|
1440
|
+
|
|
1409
1441
|
#put all image based usecases here::
|
|
1410
1442
|
def blood_cancer_detection_config_class(self):
|
|
1411
1443
|
"""Register a configuration class for a use case."""
|
|
@@ -2684,6 +2716,54 @@ class ConfigManager:
|
|
|
2684
2716
|
**kwargs
|
|
2685
2717
|
)
|
|
2686
2718
|
|
|
2719
|
+
elif usecase == "footfall":
|
|
2720
|
+
# Import here to avoid circular import
|
|
2721
|
+
from ..usecases.footfall import FootFallConfig
|
|
2722
|
+
|
|
2723
|
+
# Handle nested configurations
|
|
2724
|
+
alert_config = kwargs.pop("alert_config", None)
|
|
2725
|
+
if alert_config and isinstance(alert_config, dict):
|
|
2726
|
+
alert_config = AlertConfig(**alert_config)
|
|
2727
|
+
|
|
2728
|
+
config = FootFallConfig(
|
|
2729
|
+
category=category or "retail",
|
|
2730
|
+
usecase=usecase,
|
|
2731
|
+
alert_config=alert_config,
|
|
2732
|
+
**kwargs
|
|
2733
|
+
)
|
|
2734
|
+
|
|
2735
|
+
elif usecase == "vehicle_monitoring_parking_lot":
|
|
2736
|
+
# Import here to avoid circular import
|
|
2737
|
+
from ..usecases.vehicle_monitoring_parking_lot import VehicleMonitoringParkingLotConfig
|
|
2738
|
+
|
|
2739
|
+
# Handle nested configurations
|
|
2740
|
+
alert_config = kwargs.pop("alert_config", None)
|
|
2741
|
+
if alert_config and isinstance(alert_config, dict):
|
|
2742
|
+
alert_config = AlertConfig(**alert_config)
|
|
2743
|
+
|
|
2744
|
+
config = VehicleMonitoringParkingLotConfig(
|
|
2745
|
+
category=category or "traffic",
|
|
2746
|
+
usecase=usecase,
|
|
2747
|
+
alert_config=alert_config,
|
|
2748
|
+
**kwargs
|
|
2749
|
+
)
|
|
2750
|
+
|
|
2751
|
+
elif usecase == "vehicle_monitoring_drone_view":
|
|
2752
|
+
# Import here to avoid circular import
|
|
2753
|
+
from ..usecases.vehicle_monitoring_drone_view import VehicleMonitoringDroneViewConfig
|
|
2754
|
+
|
|
2755
|
+
# Handle nested configurations
|
|
2756
|
+
alert_config = kwargs.pop("alert_config", None)
|
|
2757
|
+
if alert_config and isinstance(alert_config, dict):
|
|
2758
|
+
alert_config = AlertConfig(**alert_config)
|
|
2759
|
+
|
|
2760
|
+
config = VehicleMonitoringDroneViewConfig(
|
|
2761
|
+
category=category or "traffic",
|
|
2762
|
+
usecase=usecase,
|
|
2763
|
+
alert_config=alert_config,
|
|
2764
|
+
**kwargs
|
|
2765
|
+
)
|
|
2766
|
+
|
|
2687
2767
|
#Add IMAGE based usecases here::
|
|
2688
2768
|
elif usecase == "blood_cancer_detection_img":
|
|
2689
2769
|
# Import here to avoid circular import
|
|
@@ -3234,6 +3314,25 @@ class ConfigManager:
|
|
|
3234
3314
|
from ..usecases.natural_disaster import NaturalDisasterConfig
|
|
3235
3315
|
default_config = NaturalDisasterConfig()
|
|
3236
3316
|
return default_config.to_dict()
|
|
3317
|
+
|
|
3318
|
+
elif usecase == "footfall":
|
|
3319
|
+
# Import here to avoid circular import
|
|
3320
|
+
from ..usecases.footfall import FootFallConfig
|
|
3321
|
+
default_config = FootFallConfig()
|
|
3322
|
+
return default_config.to_dict()
|
|
3323
|
+
|
|
3324
|
+
elif usecase == "vehicle_monitoring_parking_lot":
|
|
3325
|
+
# Import here to avoid circular import
|
|
3326
|
+
from ..usecases.vehicle_monitoring_parking_lot import VehicleMonitoringParkingLotConfig
|
|
3327
|
+
default_config = VehicleMonitoringParkingLotConfig()
|
|
3328
|
+
return default_config.to_dict()
|
|
3329
|
+
|
|
3330
|
+
elif usecase == "vehicle_monitoring_drone_view":
|
|
3331
|
+
# Import here to avoid circular import
|
|
3332
|
+
from ..usecases.vehicle_monitoring_drone_view import VehicleMonitoringDroneViewConfig
|
|
3333
|
+
default_config = VehicleMonitoringDroneViewConfig()
|
|
3334
|
+
return default_config.to_dict()
|
|
3335
|
+
|
|
3237
3336
|
|
|
3238
3337
|
elif usecase == "underground_pipeline_defect":
|
|
3239
3338
|
# Import here to avoid circular import
|
|
@@ -82,6 +82,9 @@ from .face_recognition_client import FacialRecognitionClient
|
|
|
82
82
|
from .people_activity_logging import PeopleActivityLogging
|
|
83
83
|
from .embedding_manager import EmbeddingManager, EmbeddingConfig
|
|
84
84
|
|
|
85
|
+
# Cache for location names to avoid repeated API calls
|
|
86
|
+
_location_name_cache: Dict[str, str] = {}
|
|
87
|
+
|
|
85
88
|
|
|
86
89
|
# ---- Lightweight identity tracking and temporal smoothing (adapted from compare_similarity.py) ---- #
|
|
87
90
|
from collections import deque, defaultdict
|
|
@@ -386,8 +389,10 @@ class RedisFaceMatcher:
|
|
|
386
389
|
if session is None:
|
|
387
390
|
return None
|
|
388
391
|
|
|
389
|
-
|
|
390
|
-
|
|
392
|
+
# Use run_in_executor for Python 3.8 compatibility (asyncio.to_thread requires 3.9+)
|
|
393
|
+
loop = asyncio.get_running_loop()
|
|
394
|
+
response = await loop.run_in_executor(
|
|
395
|
+
None, self._fetch_action_details_sync, session, action_id
|
|
391
396
|
)
|
|
392
397
|
if not response or not response.get("success", False):
|
|
393
398
|
self.logger.warning(
|
|
@@ -1336,6 +1341,98 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1336
1341
|
self.logger.error(f"Error during use case initialization: {e}", exc_info=True)
|
|
1337
1342
|
raise RuntimeError(f"Failed to initialize face recognition use case: {e}") from e
|
|
1338
1343
|
|
|
1344
|
+
def _extract_camera_info_from_stream(self, stream_info: Optional[Dict[str, Any]]) -> Dict[str, str]:
|
|
1345
|
+
"""
|
|
1346
|
+
Extract camera_name, camera_id, and location_id from stream_info.
|
|
1347
|
+
|
|
1348
|
+
Args:
|
|
1349
|
+
stream_info: Stream information dictionary
|
|
1350
|
+
|
|
1351
|
+
Returns:
|
|
1352
|
+
Dict with camera_name, camera_id, location_id
|
|
1353
|
+
"""
|
|
1354
|
+
camera_name = ""
|
|
1355
|
+
camera_id = ""
|
|
1356
|
+
location_id = ""
|
|
1357
|
+
|
|
1358
|
+
if not stream_info:
|
|
1359
|
+
return {"camera_name": camera_name, "camera_id": camera_id, "location_id": location_id}
|
|
1360
|
+
|
|
1361
|
+
# Extract camera_name from camera_info
|
|
1362
|
+
camera_info = stream_info.get("camera_info", {})
|
|
1363
|
+
if camera_info:
|
|
1364
|
+
camera_name = camera_info.get("camera_name", "")
|
|
1365
|
+
location_id = camera_info.get("location", "")
|
|
1366
|
+
|
|
1367
|
+
# Extract camera_id from topic (format: {camera_id}_input_topic)
|
|
1368
|
+
topic = stream_info.get("topic", "")
|
|
1369
|
+
if topic and "_input_topic" in topic:
|
|
1370
|
+
camera_id = topic.replace("_input_topic", "")
|
|
1371
|
+
|
|
1372
|
+
self.logger.debug(f"Extracted camera info - camera_name: '{camera_name}', camera_id: '{camera_id}', location_id: '{location_id}'")
|
|
1373
|
+
|
|
1374
|
+
return {"camera_name": camera_name, "camera_id": camera_id, "location_id": location_id}
|
|
1375
|
+
|
|
1376
|
+
async def _fetch_location_name(self, location_id: str) -> str:
|
|
1377
|
+
"""
|
|
1378
|
+
Fetch location name from API using location_id.
|
|
1379
|
+
|
|
1380
|
+
Args:
|
|
1381
|
+
location_id: The location ID to look up
|
|
1382
|
+
|
|
1383
|
+
Returns:
|
|
1384
|
+
Location name string, or 'Entry Reception' as default if API fails
|
|
1385
|
+
"""
|
|
1386
|
+
global _location_name_cache
|
|
1387
|
+
default_location = "Entry Reception"
|
|
1388
|
+
|
|
1389
|
+
if not location_id:
|
|
1390
|
+
self.logger.debug(f"[LOCATION] No location_id provided, using default: '{default_location}'")
|
|
1391
|
+
return default_location
|
|
1392
|
+
|
|
1393
|
+
# Check cache first
|
|
1394
|
+
if location_id in _location_name_cache:
|
|
1395
|
+
cached_name = _location_name_cache[location_id]
|
|
1396
|
+
self.logger.debug(f"[LOCATION] Using cached location name for '{location_id}': '{cached_name}'")
|
|
1397
|
+
return cached_name
|
|
1398
|
+
|
|
1399
|
+
# Need a session to make API call
|
|
1400
|
+
if not self.face_client or not hasattr(self.face_client, 'session') or not self.face_client.session:
|
|
1401
|
+
self.logger.warning(f"[LOCATION] No session available, using default: '{default_location}'")
|
|
1402
|
+
return default_location
|
|
1403
|
+
|
|
1404
|
+
try:
|
|
1405
|
+
endpoint = f"/v1/inference/get_location/{location_id}"
|
|
1406
|
+
self.logger.info(f"[LOCATION] Fetching location name from API: {endpoint}")
|
|
1407
|
+
|
|
1408
|
+
response = self.face_client.session.rpc.get(endpoint)
|
|
1409
|
+
|
|
1410
|
+
if response and isinstance(response, dict):
|
|
1411
|
+
success = response.get("success", False)
|
|
1412
|
+
if success:
|
|
1413
|
+
data = response.get("data", {})
|
|
1414
|
+
location_name = data.get("locationName", default_location)
|
|
1415
|
+
self.logger.info(f"[LOCATION] ✓ Fetched location name: '{location_name}' for location_id: '{location_id}'")
|
|
1416
|
+
|
|
1417
|
+
# Cache the result
|
|
1418
|
+
_location_name_cache[location_id] = location_name
|
|
1419
|
+
return location_name
|
|
1420
|
+
else:
|
|
1421
|
+
self.logger.warning(
|
|
1422
|
+
f"[LOCATION] API returned success=false for location_id '{location_id}': "
|
|
1423
|
+
f"{response.get('message', 'Unknown error')}"
|
|
1424
|
+
)
|
|
1425
|
+
else:
|
|
1426
|
+
self.logger.warning(f"[LOCATION] Invalid response format from API: {response}")
|
|
1427
|
+
|
|
1428
|
+
except Exception as e:
|
|
1429
|
+
self.logger.error(f"[LOCATION] Error fetching location name for '{location_id}': {e}", exc_info=True)
|
|
1430
|
+
|
|
1431
|
+
# Use default on any failure
|
|
1432
|
+
self.logger.info(f"[LOCATION] Using default location name: '{default_location}'")
|
|
1433
|
+
_location_name_cache[location_id] = default_location
|
|
1434
|
+
return default_location
|
|
1435
|
+
|
|
1339
1436
|
async def _get_facial_recognition_client(
|
|
1340
1437
|
self, config: FaceRecognitionEmbeddingConfig
|
|
1341
1438
|
) -> FacialRecognitionClient:
|
|
@@ -1352,6 +1449,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1352
1449
|
secret_key = os.getenv("MATRICE_SECRET_ACCESS_KEY", "")
|
|
1353
1450
|
project_id = os.getenv("MATRICE_PROJECT_ID", "")
|
|
1354
1451
|
|
|
1452
|
+
self.logger.info(f"[PROJECT_ID] Initial project_id from env: '{project_id}'")
|
|
1453
|
+
|
|
1355
1454
|
self.session1 = Session(
|
|
1356
1455
|
account_number=account_number,
|
|
1357
1456
|
access_key=access_key_id,
|
|
@@ -1362,6 +1461,19 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1362
1461
|
server_id=config.facial_recognition_server_id, session=self.session1
|
|
1363
1462
|
)
|
|
1364
1463
|
self.logger.info("Face recognition client initialized")
|
|
1464
|
+
|
|
1465
|
+
# After FacialRecognitionClient initialization, it may have fetched project_id from action details
|
|
1466
|
+
# and updated MATRICE_PROJECT_ID env var. Update session1 with the correct project_id.
|
|
1467
|
+
updated_project_id = self.face_client.project_id or os.getenv("MATRICE_PROJECT_ID", "")
|
|
1468
|
+
if updated_project_id and updated_project_id != project_id:
|
|
1469
|
+
self.logger.info(f"[PROJECT_ID] Project ID updated by FacialRecognitionClient: '{updated_project_id}'")
|
|
1470
|
+
try:
|
|
1471
|
+
self.session1.update(updated_project_id)
|
|
1472
|
+
self.logger.info(f"[PROJECT_ID] Updated session1 with project_id: '{updated_project_id}'")
|
|
1473
|
+
except Exception as e:
|
|
1474
|
+
self.logger.warning(f"[PROJECT_ID] Failed to update session1 with project_id: {e}")
|
|
1475
|
+
elif updated_project_id:
|
|
1476
|
+
self.logger.info(f"[PROJECT_ID] Using project_id: '{updated_project_id}'")
|
|
1365
1477
|
|
|
1366
1478
|
# Call update_deployment if deployment_id is provided
|
|
1367
1479
|
if config.deployment_id:
|
|
@@ -1415,6 +1527,9 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1415
1527
|
"""
|
|
1416
1528
|
processing_start = time.time()
|
|
1417
1529
|
# Ensure config is correct type
|
|
1530
|
+
self.logger.info(f"[CONFIG-PRINT]-------------------------- {config} --------------------------")
|
|
1531
|
+
self.logger.info(f"[STREAM-PRINT]-------------------------- {stream_info} --------------------------")
|
|
1532
|
+
|
|
1418
1533
|
if not isinstance(config, FaceRecognitionEmbeddingConfig):
|
|
1419
1534
|
return self.create_error_result(
|
|
1420
1535
|
"Invalid config type",
|
|
@@ -1546,6 +1661,16 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1546
1661
|
current_frame_staff_details = {}
|
|
1547
1662
|
|
|
1548
1663
|
|
|
1664
|
+
# Extract camera info and fetch location name
|
|
1665
|
+
camera_info_extracted = self._extract_camera_info_from_stream(stream_info)
|
|
1666
|
+
camera_name = camera_info_extracted.get("camera_name", "")
|
|
1667
|
+
camera_id = camera_info_extracted.get("camera_id", "")
|
|
1668
|
+
location_id = camera_info_extracted.get("location_id", "")
|
|
1669
|
+
|
|
1670
|
+
# Fetch actual location name from API
|
|
1671
|
+
location_name = await self._fetch_location_name(location_id)
|
|
1672
|
+
self.logger.debug(f"Using location_name: '{location_name}', camera_name: '{camera_name}', camera_id: '{camera_id}'")
|
|
1673
|
+
|
|
1549
1674
|
# Process face recognition for each detection (if enabled)
|
|
1550
1675
|
if config.enable_face_recognition:
|
|
1551
1676
|
# Additional safety check: verify embeddings are still loaded and ready
|
|
@@ -1562,7 +1687,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1562
1687
|
# )
|
|
1563
1688
|
|
|
1564
1689
|
face_recognition_result = await self._process_face_recognition(
|
|
1565
|
-
processed_data, config, stream_info, input_bytes
|
|
1690
|
+
processed_data, config, stream_info, input_bytes,
|
|
1691
|
+
camera_name=camera_name, camera_id=camera_id, location_name=location_name
|
|
1566
1692
|
)
|
|
1567
1693
|
processed_data, current_recognized_count, current_unknown_count, recognized_persons, current_frame_staff_details = face_recognition_result
|
|
1568
1694
|
else:
|
|
@@ -1753,6 +1879,9 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1753
1879
|
config: FaceRecognitionEmbeddingConfig,
|
|
1754
1880
|
stream_info: Optional[Dict[str, Any]] = None,
|
|
1755
1881
|
input_bytes: Optional[bytes] = None,
|
|
1882
|
+
camera_name: str = "",
|
|
1883
|
+
camera_id: str = "",
|
|
1884
|
+
location_name: str = "",
|
|
1756
1885
|
) -> List[Dict]:
|
|
1757
1886
|
"""Process face recognition for each detection with embeddings"""
|
|
1758
1887
|
|
|
@@ -1789,10 +1918,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1789
1918
|
)
|
|
1790
1919
|
self._frame_warning_logged = True
|
|
1791
1920
|
|
|
1792
|
-
#
|
|
1793
|
-
location =
|
|
1794
|
-
stream_info.get("camera_location", "unknown") if stream_info else "unknown"
|
|
1795
|
-
)
|
|
1921
|
+
# Use the location_name passed from process() (fetched from API)
|
|
1922
|
+
location = location_name if location_name else "Entry Reception"
|
|
1796
1923
|
|
|
1797
1924
|
# Generate current timestamp
|
|
1798
1925
|
current_timestamp = datetime.now(timezone.utc).isoformat()
|
|
@@ -1806,7 +1933,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1806
1933
|
processed_detection = await self._process_face(
|
|
1807
1934
|
detection, current_frame, location, current_timestamp, config,
|
|
1808
1935
|
current_recognized_count, current_unknown_count,
|
|
1809
|
-
recognized_persons, current_frame_staff_details
|
|
1936
|
+
recognized_persons, current_frame_staff_details,
|
|
1937
|
+
camera_name=camera_name, camera_id=camera_id
|
|
1810
1938
|
)
|
|
1811
1939
|
# print("------------------WHOLE FACE RECOG PROCESSING DETECTION----------------------------")
|
|
1812
1940
|
# print("LATENCY:",(time.time() - st1)*1000,"| Throughput fps:",(1.0 / (time.time() - st1)) if (time.time() - st1) > 0 else None)
|
|
@@ -1838,6 +1966,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
1838
1966
|
current_unknown_count: int = 0,
|
|
1839
1967
|
recognized_persons: Dict = None,
|
|
1840
1968
|
current_frame_staff_details: Dict = None,
|
|
1969
|
+
camera_name: str = "",
|
|
1970
|
+
camera_id: str = "",
|
|
1841
1971
|
) -> Dict:
|
|
1842
1972
|
|
|
1843
1973
|
# Extract and validate embedding using EmbeddingManager
|
|
@@ -2011,6 +2141,8 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
2011
2141
|
detection=detection,
|
|
2012
2142
|
current_frame=current_frame,
|
|
2013
2143
|
location=location,
|
|
2144
|
+
camera_name=camera_name,
|
|
2145
|
+
camera_id=camera_id,
|
|
2014
2146
|
)
|
|
2015
2147
|
# print("------------------FACE RECOG ENQUEUEING DETECTION FOR ACTIVITY LOGGING----------------------------")
|
|
2016
2148
|
# print("LATENCY:",(time.time() - st4)*1000,"| Throughput fps:",(1.0 / (time.time() - st4)) if (time.time() - st4) > 0 else None)
|
|
@@ -2273,19 +2405,19 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
2273
2405
|
|
|
2274
2406
|
# Build current_counts array in expected format
|
|
2275
2407
|
current_counts = []
|
|
2276
|
-
for cat, count in per_category_count.items():
|
|
2277
|
-
|
|
2278
|
-
|
|
2408
|
+
# for cat, count in per_category_count.items():
|
|
2409
|
+
# if count > 0 or total_detections > 0:
|
|
2410
|
+
# current_counts.append({"category": cat, "count": count})
|
|
2279
2411
|
|
|
2280
2412
|
# Add face recognition specific current counts
|
|
2281
2413
|
current_frame = face_summary.get("current_frame", {})
|
|
2282
2414
|
current_counts.extend(
|
|
2283
2415
|
[
|
|
2284
2416
|
{
|
|
2285
|
-
"category": "
|
|
2417
|
+
"category": "Recognized Faces",
|
|
2286
2418
|
"count": current_frame.get("recognized", 0),
|
|
2287
2419
|
},
|
|
2288
|
-
{"category": "
|
|
2420
|
+
{"category": "Unknown Faces", "count": current_frame.get("unknown", 0)},
|
|
2289
2421
|
]
|
|
2290
2422
|
)
|
|
2291
2423
|
|
|
@@ -2408,7 +2540,7 @@ class FaceRecognitionEmbeddingUseCase(BaseProcessor):
|
|
|
2408
2540
|
start_time=high_precision_start_timestamp,
|
|
2409
2541
|
reset_time=high_precision_reset_timestamp,
|
|
2410
2542
|
)
|
|
2411
|
-
|
|
2543
|
+
tracking_stat['target_categories'] = ['Recognized Faces', 'Unknown Faces']
|
|
2412
2544
|
tracking_stats.append(tracking_stat)
|
|
2413
2545
|
return tracking_stats
|
|
2414
2546
|
|