matrice-analytics 0.1.70__py3-none-any.whl → 0.1.96__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/post_processing/__init__.py +8 -2
- matrice_analytics/post_processing/config.py +4 -2
- matrice_analytics/post_processing/core/base.py +1 -1
- matrice_analytics/post_processing/core/config.py +40 -3
- matrice_analytics/post_processing/face_reg/face_recognition.py +1014 -201
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +171 -29
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +19 -0
- matrice_analytics/post_processing/post_processor.py +4 -0
- matrice_analytics/post_processing/usecases/__init__.py +4 -1
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +913 -500
- matrice_analytics/post_processing/usecases/color_detection.py +19 -18
- matrice_analytics/post_processing/usecases/customer_service.py +356 -9
- matrice_analytics/post_processing/usecases/fire_detection.py +241 -23
- matrice_analytics/post_processing/usecases/footfall.py +750 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +638 -40
- matrice_analytics/post_processing/usecases/people_counting.py +66 -33
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +35 -34
- matrice_analytics/post_processing/usecases/weapon_detection.py +2 -1
- matrice_analytics/post_processing/utils/alert_instance_utils.py +1018 -0
- matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1338 -0
- matrice_analytics/post_processing/utils/incident_manager_utils.py +1754 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/RECORD +26 -22
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/top_level.txt +0 -0
|
@@ -18,6 +18,8 @@ from ..utils import (
|
|
|
18
18
|
BBoxSmoothingConfig,
|
|
19
19
|
BBoxSmoothingTracker
|
|
20
20
|
)
|
|
21
|
+
# Import alert system utilities
|
|
22
|
+
from ..utils.alert_instance_utils import ALERT_INSTANCE
|
|
21
23
|
# External dependencies
|
|
22
24
|
import cv2
|
|
23
25
|
import numpy as np
|
|
@@ -31,11 +33,15 @@ import asyncio
|
|
|
31
33
|
import urllib
|
|
32
34
|
import urllib.request
|
|
33
35
|
import base64
|
|
36
|
+
from pathlib import Path
|
|
34
37
|
# Get the major and minor version numbers
|
|
35
38
|
major_version = sys.version_info.major
|
|
36
39
|
minor_version = sys.version_info.minor
|
|
37
40
|
print(f"Python version: {major_version}.{minor_version}")
|
|
38
41
|
os.environ["ORT_LOG_SEVERITY_LEVEL"] = "3"
|
|
42
|
+
import base64
|
|
43
|
+
from matrice_common.stream.matrice_stream import MatriceStream, StreamType
|
|
44
|
+
from matrice_common.session import Session
|
|
39
45
|
|
|
40
46
|
|
|
41
47
|
# Lazy import mechanism for LicensePlateRecognizer
|
|
@@ -129,7 +135,6 @@ from ..ocr.preprocessing import ImagePreprocessor
|
|
|
129
135
|
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
130
136
|
|
|
131
137
|
try:
|
|
132
|
-
from matrice_common.session import Session
|
|
133
138
|
HAS_MATRICE_SESSION = True
|
|
134
139
|
except ImportError:
|
|
135
140
|
HAS_MATRICE_SESSION = False
|
|
@@ -156,6 +161,7 @@ class LicensePlateMonitorConfig(BaseConfig):
|
|
|
156
161
|
ocr_mode:str = field(default_factory=lambda: "numeric") # "alphanumeric" or "numeric" or "alphabetic"
|
|
157
162
|
session: Optional[Session] = None
|
|
158
163
|
lpr_server_id: Optional[str] = None # Optional LPR server ID for remote logging
|
|
164
|
+
redis_server_id: Optional[str] = None # Optional Redis server ID for instant alerts
|
|
159
165
|
plate_log_cooldown: float = 30.0 # Cooldown period in seconds for logging same plate
|
|
160
166
|
|
|
161
167
|
def validate(self) -> List[str]:
|
|
@@ -237,6 +243,7 @@ class LicensePlateMonitorLogger:
|
|
|
237
243
|
# Fetch server connection info if lpr_server_id is provided
|
|
238
244
|
if config.lpr_server_id:
|
|
239
245
|
self.lpr_server_id = config.lpr_server_id
|
|
246
|
+
self.logger.info(f"[LP_LOGGING] CONFIG PRINTTEST: {config}")
|
|
240
247
|
self.logger.info(f"[LP_LOGGING] Fetching LPR server connection info for server ID: {self.lpr_server_id}")
|
|
241
248
|
try:
|
|
242
249
|
self.server_info = self.get_server_connection_info()
|
|
@@ -265,6 +272,7 @@ class LicensePlateMonitorLogger:
|
|
|
265
272
|
self.logger.error("[LP_LOGGING] Failed to fetch LPR server connection info - server_info is None")
|
|
266
273
|
self.logger.error("[LP_LOGGING] This will prevent plate logging from working!")
|
|
267
274
|
except Exception as e:
|
|
275
|
+
#pass
|
|
268
276
|
self.logger.error(f"[LP_LOGGING] Error fetching LPR server connection info: {e}", exc_info=True)
|
|
269
277
|
self.logger.error("[LP_LOGGING] This will prevent plate logging from working!")
|
|
270
278
|
else:
|
|
@@ -284,6 +292,17 @@ class LicensePlateMonitorLogger:
|
|
|
284
292
|
self.logger.error(f"Error fetching external IP: {e}", exc_info=True)
|
|
285
293
|
return "localhost"
|
|
286
294
|
|
|
295
|
+
def _get_backend_base_url(self) -> str:
|
|
296
|
+
"""Resolve backend base URL based on ENV variable: prod/staging/dev."""
|
|
297
|
+
env = os.getenv("ENV", "prod").strip().lower()
|
|
298
|
+
if env in ("prod", "production"):
|
|
299
|
+
host = "prod.backend.app.matrice.ai"
|
|
300
|
+
elif env in ("dev", "development"):
|
|
301
|
+
host = "dev.backend.app.matrice.ai"
|
|
302
|
+
else:
|
|
303
|
+
host = "staging.backend.app.matrice.ai"
|
|
304
|
+
return f"https://{host}"
|
|
305
|
+
|
|
287
306
|
def get_server_connection_info(self) -> Optional[Dict[str, Any]]:
|
|
288
307
|
"""Fetch server connection info from RPC."""
|
|
289
308
|
if not self.lpr_server_id:
|
|
@@ -406,16 +425,21 @@ class LicensePlateMonitorLogger:
|
|
|
406
425
|
self.logger.info(f"[LP_LOGGING] ===== PLATE LOG REQUEST END (SKIPPED) =====")
|
|
407
426
|
return False
|
|
408
427
|
|
|
428
|
+
if not stream_info:
|
|
429
|
+
self.logger.info(f"[LP_LOGGING] Stream info is None, skipping plate log")
|
|
430
|
+
stream_info = {}
|
|
431
|
+
|
|
409
432
|
try:
|
|
410
433
|
camera_info = stream_info.get("camera_info", {})
|
|
411
|
-
camera_name = camera_info.get("camera_name", "")
|
|
412
|
-
location = camera_info.get("location", "")
|
|
434
|
+
camera_name = camera_info.get("camera_name", "default_camera")
|
|
435
|
+
location = camera_info.get("location", "default_location")
|
|
413
436
|
frame_id = stream_info.get("frame_id", "")
|
|
414
437
|
|
|
415
438
|
print(f"[LP_LOGGING] Camera: '{camera_name}', Location: '{location}'")
|
|
416
439
|
self.logger.info(f"[LP_LOGGING] Stream Info - Camera: '{camera_name}', Location: '{location}', Frame ID: '{frame_id}'")
|
|
417
440
|
|
|
418
441
|
# Get project ID from server_info
|
|
442
|
+
self.logger.info(f"[LP_LOGGING] SERVER-INFO: '{self.server_info}'")
|
|
419
443
|
project_id = self.server_info.get('projectID', '') if self.server_info else ''
|
|
420
444
|
self.logger.info(f"[LP_LOGGING] Project ID: '{project_id}'")
|
|
421
445
|
|
|
@@ -438,7 +462,7 @@ class LicensePlateMonitorLogger:
|
|
|
438
462
|
full_url = f"{self.server_base_url}{endpoint}"
|
|
439
463
|
print(f"[LP_LOGGING] Sending POST to: {full_url}")
|
|
440
464
|
self.logger.info(f"[LP_LOGGING] Sending POST request to: {full_url}")
|
|
441
|
-
self.logger.info(f"[LP_LOGGING] Payload: licensePlate='{plate_text}', frameId='{frame_id}', location='{location}', camera='{camera_name}'
|
|
465
|
+
self.logger.info(f"[LP_LOGGING] Payload: licensePlate='{plate_text}', frameId='{frame_id}', location='{location}', camera='{camera_name}'")
|
|
442
466
|
|
|
443
467
|
response = await self.session.rpc.post_async(endpoint, payload=payload, base_url=self.server_base_url)
|
|
444
468
|
|
|
@@ -509,9 +533,397 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
509
533
|
|
|
510
534
|
# Initialize plate logger (optional, only used if lpr_server_id is provided)
|
|
511
535
|
self.plate_logger: Optional[LicensePlateMonitorLogger] = None
|
|
512
|
-
self._logging_enabled = True
|
|
536
|
+
self._logging_enabled = True # False //ToDo: DISABLED FOR NOW, ENABLED FOR PRODUCTION. ##
|
|
513
537
|
self._plate_logger_initialized = False # Track if plate logger has been initialized
|
|
538
|
+
|
|
539
|
+
# Initialize instant alert manager (will be lazily initialized on first process() call)
|
|
540
|
+
self.alert_manager: Optional[ALERT_INSTANCE] = None
|
|
541
|
+
self._alert_manager_initialized = False # Track initialization to do it only once
|
|
542
|
+
|
|
543
|
+
def set_alert_manager(self, alert_manager: ALERT_INSTANCE) -> None:
|
|
544
|
+
"""
|
|
545
|
+
Set the alert manager instance for instant alerts.
|
|
546
|
+
|
|
547
|
+
Args:
|
|
548
|
+
alert_manager: ALERT_INSTANCE instance configured with Redis/Kafka clients
|
|
549
|
+
"""
|
|
550
|
+
self.alert_manager = alert_manager
|
|
551
|
+
self.logger.info("Alert manager set for license plate monitoring")
|
|
552
|
+
|
|
553
|
+
def _discover_action_id(self) -> Optional[str]:
|
|
554
|
+
"""Discover action_id from current working directory name (and parents), similar to face_recognition flow."""
|
|
555
|
+
try:
|
|
556
|
+
import re as _re
|
|
557
|
+
pattern = _re.compile(r"^[0-9a-f]{8,}$", _re.IGNORECASE)
|
|
558
|
+
candidates: List[str] = []
|
|
559
|
+
try:
|
|
560
|
+
cwd = Path.cwd()
|
|
561
|
+
candidates.append(cwd.name)
|
|
562
|
+
for parent in cwd.parents:
|
|
563
|
+
candidates.append(parent.name)
|
|
564
|
+
except Exception:
|
|
565
|
+
pass
|
|
566
|
+
|
|
567
|
+
try:
|
|
568
|
+
usr_src = Path("/usr/src")
|
|
569
|
+
if usr_src.exists():
|
|
570
|
+
for child in usr_src.iterdir():
|
|
571
|
+
if child.is_dir():
|
|
572
|
+
candidates.append(child.name)
|
|
573
|
+
except Exception:
|
|
574
|
+
pass
|
|
575
|
+
|
|
576
|
+
for candidate in candidates:
|
|
577
|
+
if candidate and len(candidate) >= 8 and pattern.match(candidate):
|
|
578
|
+
return candidate
|
|
579
|
+
except Exception:
|
|
580
|
+
pass
|
|
581
|
+
return None
|
|
582
|
+
|
|
583
|
+
def _get_backend_base_url(self) -> str:
|
|
584
|
+
"""Resolve backend base URL based on ENV variable: prod/staging/dev."""
|
|
585
|
+
env = os.getenv("ENV", "prod").strip().lower()
|
|
586
|
+
if env in ("prod", "production"):
|
|
587
|
+
host = "prod.backend.app.matrice.ai"
|
|
588
|
+
elif env in ("dev", "development"):
|
|
589
|
+
host = "dev.backend.app.matrice.ai"
|
|
590
|
+
else:
|
|
591
|
+
host = "staging.backend.app.matrice.ai"
|
|
592
|
+
return f"https://{host}"
|
|
593
|
+
|
|
594
|
+
def _mask_value(self, value: Optional[str]) -> str:
|
|
595
|
+
"""Mask sensitive values for logging/printing."""
|
|
596
|
+
if not value:
|
|
597
|
+
return ""
|
|
598
|
+
if len(value) <= 4:
|
|
599
|
+
return "*" * len(value)
|
|
600
|
+
return value[:2] + "*" * (len(value) - 4) + value[-2:]
|
|
601
|
+
|
|
602
|
+
def _get_public_ip(self) -> str:
|
|
603
|
+
"""Get the public IP address of this machine."""
|
|
604
|
+
self.logger.info("Fetching public IP address...")
|
|
605
|
+
try:
|
|
606
|
+
public_ip = urllib.request.urlopen("https://v4.ident.me", timeout=120).read().decode("utf8").strip()
|
|
607
|
+
#self.logger.info(f"Successfully fetched external IP: {public_ip}")
|
|
608
|
+
return public_ip
|
|
609
|
+
except Exception as e:
|
|
610
|
+
#self.logger.error(f"Error fetching external IP: {e}", exc_info=True)
|
|
611
|
+
return "localhost"
|
|
612
|
+
|
|
613
|
+
def _fetch_location_name(self, location_id: str, session: Optional[Session] = None) -> str:
|
|
614
|
+
"""
|
|
615
|
+
Fetch location name from API using location_id.
|
|
616
|
+
|
|
617
|
+
Args:
|
|
618
|
+
location_id: The location ID to look up
|
|
619
|
+
session: Matrice session for API calls
|
|
620
|
+
|
|
621
|
+
Returns:
|
|
622
|
+
Location name string, or 'Entry Reception' as default if API fails
|
|
623
|
+
"""
|
|
624
|
+
default_location = "Entry Reception"
|
|
625
|
+
|
|
626
|
+
if not location_id:
|
|
627
|
+
self.logger.debug(f"[LOCATION] No location_id provided, using default: '{default_location}'")
|
|
628
|
+
return default_location
|
|
629
|
+
|
|
630
|
+
# Check cache first
|
|
631
|
+
if not hasattr(self, '_location_name_cache'):
|
|
632
|
+
self._location_name_cache: Dict[str, str] = {}
|
|
633
|
+
|
|
634
|
+
if location_id in self._location_name_cache:
|
|
635
|
+
cached_name = self._location_name_cache[location_id]
|
|
636
|
+
self.logger.debug(f"[LOCATION] Using cached location name for '{location_id}': '{cached_name}'")
|
|
637
|
+
return cached_name
|
|
638
|
+
|
|
639
|
+
if not session:
|
|
640
|
+
self.logger.warning(f"[LOCATION] No session provided, using default: '{default_location}'")
|
|
641
|
+
return default_location
|
|
642
|
+
|
|
643
|
+
try:
|
|
644
|
+
endpoint = f"/v1/inference/get_location/{location_id}"
|
|
645
|
+
self.logger.info(f"[LOCATION] Fetching location name from API: {endpoint}")
|
|
646
|
+
|
|
647
|
+
response = session.rpc.get(endpoint)
|
|
648
|
+
|
|
649
|
+
if response and isinstance(response, dict):
|
|
650
|
+
success = response.get("success", False)
|
|
651
|
+
if success:
|
|
652
|
+
data = response.get("data", {})
|
|
653
|
+
location_name = data.get("locationName", default_location)
|
|
654
|
+
self.logger.info(f"[LOCATION] ✓ Fetched location name: '{location_name}' for location_id: '{location_id}'")
|
|
655
|
+
|
|
656
|
+
# Cache the result
|
|
657
|
+
self._location_name_cache[location_id] = location_name
|
|
658
|
+
return location_name
|
|
659
|
+
else:
|
|
660
|
+
self.logger.warning(
|
|
661
|
+
f"[LOCATION] API returned success=false for location_id '{location_id}': "
|
|
662
|
+
f"{response.get('message', 'Unknown error')}"
|
|
663
|
+
)
|
|
664
|
+
else:
|
|
665
|
+
self.logger.warning(f"[LOCATION] Invalid response format from API: {response}")
|
|
666
|
+
|
|
667
|
+
except Exception as e:
|
|
668
|
+
self.logger.error(f"[LOCATION] Error fetching location name for '{location_id}': {e}", exc_info=True)
|
|
514
669
|
|
|
670
|
+
# Use default on any failure
|
|
671
|
+
self.logger.info(f"[LOCATION] Using default location name: '{default_location}'")
|
|
672
|
+
self._location_name_cache[location_id] = default_location
|
|
673
|
+
return default_location
|
|
674
|
+
|
|
675
|
+
def _initialize_alert_manager_once(self, config: LicensePlateMonitorConfig) -> None:
|
|
676
|
+
"""
|
|
677
|
+
Initialize alert manager ONCE with Redis OR Kafka clients (Environment based).
|
|
678
|
+
Called from process() on first invocation.
|
|
679
|
+
Uses config.session (existing session from pipeline).
|
|
680
|
+
"""
|
|
681
|
+
if self._alert_manager_initialized:
|
|
682
|
+
return
|
|
683
|
+
|
|
684
|
+
try:
|
|
685
|
+
# Import required modules
|
|
686
|
+
import base64
|
|
687
|
+
from matrice_common.stream.matrice_stream import MatriceStream, StreamType
|
|
688
|
+
|
|
689
|
+
# Use existing session from config (same pattern as plate_logger)
|
|
690
|
+
if not config.session:
|
|
691
|
+
account_number = os.getenv("MATRICE_ACCOUNT_NUMBER", "")
|
|
692
|
+
access_key_id = os.getenv("MATRICE_ACCESS_KEY_ID", "")
|
|
693
|
+
secret_key = os.getenv("MATRICE_SECRET_ACCESS_KEY", "")
|
|
694
|
+
project_id = os.getenv("MATRICE_PROJECT_ID", "")
|
|
695
|
+
|
|
696
|
+
self.session = Session(
|
|
697
|
+
account_number=account_number,
|
|
698
|
+
access_key=access_key_id,
|
|
699
|
+
secret_key=secret_key,
|
|
700
|
+
project_id=project_id,
|
|
701
|
+
)
|
|
702
|
+
config.session = self.session
|
|
703
|
+
if not self.session:
|
|
704
|
+
self.logger.warning("[ALERT] No session in config OR manual, skipping alert manager initialization")
|
|
705
|
+
self._alert_manager_initialized = True
|
|
706
|
+
return
|
|
707
|
+
|
|
708
|
+
rpc = config.session.rpc
|
|
709
|
+
|
|
710
|
+
# Determine environment: Localhost vs Cloud
|
|
711
|
+
# We use LPR server info to determine if we are local or cloud, similar to face_recognition_client
|
|
712
|
+
is_localhost = False
|
|
713
|
+
lpr_server_id = config.lpr_server_id
|
|
714
|
+
print("--------------------------------CONFIG-PRINT---------------------------")
|
|
715
|
+
print(config)
|
|
716
|
+
print("--------------------------------CONFIG-PRINT---------------------------")
|
|
717
|
+
if lpr_server_id:
|
|
718
|
+
try:
|
|
719
|
+
# Fetch LPR server info to compare IPs
|
|
720
|
+
response = rpc.get(f"/v1/actions/lpr_servers/{lpr_server_id}")
|
|
721
|
+
if response.get("success", False) and response.get("data"):
|
|
722
|
+
server_data = response.get("data", {})
|
|
723
|
+
server_host = server_data.get("host", "")
|
|
724
|
+
public_ip = self._get_public_ip()
|
|
725
|
+
|
|
726
|
+
# Check if server_host indicates localhost
|
|
727
|
+
localhost_indicators = ["localhost", "127.0.0.1", "0.0.0.0"]
|
|
728
|
+
if server_host in localhost_indicators or server_host == public_ip:
|
|
729
|
+
is_localhost = True
|
|
730
|
+
self.logger.info(f"[ALERT] Detected Localhost environment (Public IP={public_ip}, Server IP={server_host})")
|
|
731
|
+
else:
|
|
732
|
+
is_localhost = False
|
|
733
|
+
self.logger.info(f"[ALERT] Detected Cloud environment (Public IP={public_ip}, Server IP={server_host})")
|
|
734
|
+
else:
|
|
735
|
+
self.logger.warning(f"[ALERT] Failed to fetch LPR server info for environment detection, defaulting to Cloud mode")
|
|
736
|
+
except Exception as e:
|
|
737
|
+
self.logger.warning(f"[ALERT] Error detecting environment: {e}, defaulting to Cloud mode")
|
|
738
|
+
else:
|
|
739
|
+
self.logger.info("[ALERT] No LPR server ID, defaulting to Cloud mode")
|
|
740
|
+
|
|
741
|
+
# ------------------------------------------------------------------
|
|
742
|
+
# Discover action_id and fetch action details (STRICT API-DRIVEN)
|
|
743
|
+
# ------------------------------------------------------------------
|
|
744
|
+
action_id = self._discover_action_id()
|
|
745
|
+
if not action_id:
|
|
746
|
+
self.logger.error("[ALERT] Could not discover action_id from working directory or parents")
|
|
747
|
+
print("----- ALERT ACTION DISCOVERY -----")
|
|
748
|
+
print("action_id: NOT FOUND")
|
|
749
|
+
print("----------------------------------")
|
|
750
|
+
self._alert_manager_initialized = True
|
|
751
|
+
return
|
|
752
|
+
|
|
753
|
+
try:
|
|
754
|
+
action_url = f"/v1/actions/action/{action_id}/details"
|
|
755
|
+
action_resp = rpc.get(action_url)
|
|
756
|
+
if not (action_resp and action_resp.get("success", False)):
|
|
757
|
+
raise RuntimeError(action_resp.get("message", "Unknown error") if isinstance(action_resp, dict) else "Unknown error")
|
|
758
|
+
action_doc = action_resp.get("data", {}) if isinstance(action_resp, dict) else {}
|
|
759
|
+
action_details = action_doc.get("actionDetails", {}) if isinstance(action_doc, dict) else {}
|
|
760
|
+
|
|
761
|
+
# server id and type extraction (robust to variants)
|
|
762
|
+
server_id = (
|
|
763
|
+
action_details.get("serverId")
|
|
764
|
+
or action_details.get("server_id")
|
|
765
|
+
or action_details.get("serverID")
|
|
766
|
+
or action_details.get("redis_server_id")
|
|
767
|
+
or action_details.get("kafka_server_id")
|
|
768
|
+
)
|
|
769
|
+
server_type = (
|
|
770
|
+
action_details.get("serverType")
|
|
771
|
+
or action_details.get("server_type")
|
|
772
|
+
or action_details.get("type")
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
# Persist identifiers for future
|
|
776
|
+
self._action_id = action_id
|
|
777
|
+
self._deployment_id = action_details.get("_idDeployment") or action_details.get("deployment_id")
|
|
778
|
+
self._app_deployment_id = action_details.get("app_deployment_id")
|
|
779
|
+
self._instance_id = action_details.get("instanceID") or action_details.get("instanceId")
|
|
780
|
+
self._external_ip = action_details.get("externalIP") or action_details.get("externalIp")
|
|
781
|
+
|
|
782
|
+
print("----- ALERT ACTION DETAILS -----")
|
|
783
|
+
print(f"action_id: {action_id}")
|
|
784
|
+
print(f"server_type: {server_type}")
|
|
785
|
+
print(f"server_id: {server_id}")
|
|
786
|
+
print(f"deployment_id: {self._deployment_id}")
|
|
787
|
+
print(f"app_deployment_id: {self._app_deployment_id}")
|
|
788
|
+
print(f"instance_id: {self._instance_id}")
|
|
789
|
+
print(f"external_ip: {self._external_ip}")
|
|
790
|
+
print("--------------------------------")
|
|
791
|
+
self.logger.info(f"[ALERT] Action details fetched | action_id={action_id}, server_type={server_type}, server_id={server_id}")
|
|
792
|
+
self.logger.debug(f"[ALERT] Full action_details: {action_details}")
|
|
793
|
+
except Exception as e:
|
|
794
|
+
self.logger.error(f"[ALERT] Failed to fetch action details for action_id={action_id}: {e}", exc_info=True)
|
|
795
|
+
print("----- ALERT ACTION DETAILS ERROR -----")
|
|
796
|
+
print(f"action_id: {action_id}")
|
|
797
|
+
print(f"error: {e}")
|
|
798
|
+
print("--------------------------------------")
|
|
799
|
+
self._alert_manager_initialized = True
|
|
800
|
+
return
|
|
801
|
+
|
|
802
|
+
redis_client = None
|
|
803
|
+
kafka_client = None
|
|
804
|
+
|
|
805
|
+
# STRICT SWITCH: Only Redis if localhost, Only Kafka if cloud
|
|
806
|
+
if is_localhost:
|
|
807
|
+
# Initialize Redis client (ONLY) using STRICT API by instanceID
|
|
808
|
+
instance_id = getattr(self, "_instance_id", None)
|
|
809
|
+
if not instance_id:
|
|
810
|
+
self.logger.error("[ALERT] Localhost mode but instance_id missing in action details for Redis initialization")
|
|
811
|
+
else:
|
|
812
|
+
try:
|
|
813
|
+
backend_base = self._get_backend_base_url()
|
|
814
|
+
url = f"/v1/actions/get_redis_server_by_instance_id/{instance_id}"
|
|
815
|
+
self.logger.info(f"[ALERT] Initializing Redis client via API for Localhost mode (instance_id={instance_id})")
|
|
816
|
+
response = rpc.get(url)
|
|
817
|
+
if isinstance(response, dict) and response.get("success", False):
|
|
818
|
+
data = response.get("data", {})
|
|
819
|
+
host = data.get("host")
|
|
820
|
+
port = data.get("port")
|
|
821
|
+
username = data.get("username")
|
|
822
|
+
password = data.get("password", "")
|
|
823
|
+
db_index = data.get("db", 0)
|
|
824
|
+
conn_timeout = data.get("connection_timeout", 120)
|
|
825
|
+
|
|
826
|
+
print("----- REDIS SERVER PARAMS -----")
|
|
827
|
+
print(f"server_type: {server_type}")
|
|
828
|
+
print(f"instance_id: {instance_id}")
|
|
829
|
+
print(f"host: {host}")
|
|
830
|
+
print(f"port: {port}")
|
|
831
|
+
print(f"username: {username}")
|
|
832
|
+
print(f"password: {password}")
|
|
833
|
+
print(f"db: {db_index}")
|
|
834
|
+
print(f"connection_timeout: {conn_timeout}")
|
|
835
|
+
print("--------------------------------")
|
|
836
|
+
|
|
837
|
+
self.logger.info(f"[ALERT] Redis server params | instance_id={instance_id}, host={host}, port={port}, user={username}, db={db_index}")
|
|
838
|
+
|
|
839
|
+
# Initialize without gating on status
|
|
840
|
+
redis_client = MatriceStream(
|
|
841
|
+
StreamType.REDIS,
|
|
842
|
+
host=host,
|
|
843
|
+
port=int(port),
|
|
844
|
+
password=password,
|
|
845
|
+
username=username,
|
|
846
|
+
db=db_index,
|
|
847
|
+
connection_timeout=conn_timeout
|
|
848
|
+
)
|
|
849
|
+
redis_client.setup("alert_instant_config_request")
|
|
850
|
+
self.logger.info("[ALERT] Redis client initialized successfully")
|
|
851
|
+
else:
|
|
852
|
+
self.logger.warning(f"[ALERT] Failed to fetch Redis server info: {response.get('message', 'Unknown error') if isinstance(response, dict) else 'Unknown error'}")
|
|
853
|
+
except Exception as e:
|
|
854
|
+
self.logger.warning(f"[ALERT] Redis initialization failed: {e}")
|
|
855
|
+
|
|
856
|
+
else:
|
|
857
|
+
# Initialize Kafka client (ONLY) using STRICT API (global info endpoint)
|
|
858
|
+
try:
|
|
859
|
+
backend_base = self._get_backend_base_url()
|
|
860
|
+
url = f"/v1/actions/get_kafka_info"
|
|
861
|
+
self.logger.info("[ALERT] Initializing Kafka client via API for Cloud mode")
|
|
862
|
+
response = rpc.get(url)
|
|
863
|
+
if isinstance(response, dict) and response.get("success", False):
|
|
864
|
+
data = response.get("data", {})
|
|
865
|
+
enc_ip = data.get("ip")
|
|
866
|
+
enc_port = data.get("port")
|
|
867
|
+
ip_addr = None
|
|
868
|
+
port = None
|
|
869
|
+
try:
|
|
870
|
+
ip_addr = base64.b64decode(str(enc_ip)).decode("utf-8")
|
|
871
|
+
except Exception:
|
|
872
|
+
ip_addr = enc_ip
|
|
873
|
+
try:
|
|
874
|
+
port = base64.b64decode(str(enc_port)).decode("utf-8")
|
|
875
|
+
except Exception:
|
|
876
|
+
port = enc_port
|
|
877
|
+
|
|
878
|
+
print("----- KAFKA SERVER PARAMS -----")
|
|
879
|
+
print(f"server_type: {server_type}")
|
|
880
|
+
print(f"ipAddress: {ip_addr}")
|
|
881
|
+
print(f"port: {port}")
|
|
882
|
+
print("--------------------------------")
|
|
883
|
+
|
|
884
|
+
self.logger.info(f"[ALERT] Kafka server params | ip={ip_addr}, port={port}")
|
|
885
|
+
|
|
886
|
+
bootstrap_servers = f"{ip_addr}:{port}"
|
|
887
|
+
kafka_client = MatriceStream(
|
|
888
|
+
StreamType.KAFKA,
|
|
889
|
+
bootstrap_servers=bootstrap_servers,
|
|
890
|
+
sasl_mechanism="SCRAM-SHA-256",
|
|
891
|
+
sasl_username="matrice-sdk-user",
|
|
892
|
+
sasl_password="matrice-sdk-password",
|
|
893
|
+
security_protocol="SASL_PLAINTEXT"
|
|
894
|
+
)
|
|
895
|
+
kafka_client.setup("alert_instant_config_request", consumer_group_id="py_analytics_lpr_alerts")
|
|
896
|
+
self.logger.info(f"[ALERT] Kafka client initialized successfully (servers={bootstrap_servers})")
|
|
897
|
+
else:
|
|
898
|
+
self.logger.warning(f"[ALERT] Failed to fetch Kafka server info: {response.get('message', 'Unknown error') if isinstance(response, dict) else 'Unknown error'}")
|
|
899
|
+
except Exception as e:
|
|
900
|
+
self.logger.warning(f"[ALERT] Kafka initialization failed: {e}")
|
|
901
|
+
|
|
902
|
+
# Create alert manager if client is available
|
|
903
|
+
if redis_client or kafka_client:
|
|
904
|
+
# Get app_deployment_id from action_details for filtering alerts
|
|
905
|
+
app_deployment_id_for_alert = getattr(self, '_app_deployment_id', None)
|
|
906
|
+
self.logger.info(f"[ALERT] Using app_deployment_id for alert filtering: {app_deployment_id_for_alert}")
|
|
907
|
+
|
|
908
|
+
self.alert_manager = ALERT_INSTANCE(
|
|
909
|
+
redis_client=redis_client,
|
|
910
|
+
kafka_client=kafka_client,
|
|
911
|
+
config_topic="alert_instant_config_request",
|
|
912
|
+
trigger_topic="alert_instant_triggered",
|
|
913
|
+
polling_interval=10, # Poll every 10 seconds
|
|
914
|
+
logger=self.logger,
|
|
915
|
+
app_deployment_id=app_deployment_id_for_alert
|
|
916
|
+
)
|
|
917
|
+
self.alert_manager.start()
|
|
918
|
+
transport = "Redis" if redis_client else "Kafka"
|
|
919
|
+
self.logger.info(f"[ALERT] Alert manager initialized and started with {transport} (polling every 10s)")
|
|
920
|
+
else:
|
|
921
|
+
self.logger.warning(f"[ALERT] No {'Redis' if is_localhost else 'Kafka'} client available for {'Localhost' if is_localhost else 'Cloud'} mode, alerts disabled")
|
|
922
|
+
|
|
923
|
+
except Exception as e:
|
|
924
|
+
self.logger.error(f"[ALERT] Alert manager initialization failed: {e}", exc_info=True)
|
|
925
|
+
finally:
|
|
926
|
+
self._alert_manager_initialized = True # Mark as initialized (don't retry every frame)
|
|
515
927
|
|
|
516
928
|
def reset_tracker(self) -> None:
|
|
517
929
|
"""Reset the advanced tracker instance."""
|
|
@@ -537,6 +949,171 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
537
949
|
self.reset_tracker()
|
|
538
950
|
self.reset_plate_tracking()
|
|
539
951
|
self.logger.info("All plate tracking state reset")
|
|
952
|
+
|
|
953
|
+
def _send_instant_alerts(
|
|
954
|
+
self,
|
|
955
|
+
detections: List[Dict[str, Any]],
|
|
956
|
+
stream_info: Optional[Dict[str, Any]],
|
|
957
|
+
config: LicensePlateMonitorConfig
|
|
958
|
+
) -> None:
|
|
959
|
+
"""
|
|
960
|
+
Send detection events to the instant alert system.
|
|
961
|
+
|
|
962
|
+
This method processes detections and sends them to the alert manager
|
|
963
|
+
for evaluation against active alert configurations.
|
|
964
|
+
|
|
965
|
+
Args:
|
|
966
|
+
detections: List of detection dictionaries with plate_text
|
|
967
|
+
stream_info: Stream information containing camera_id and other metadata
|
|
968
|
+
config: License plate monitoring configuration
|
|
969
|
+
"""
|
|
970
|
+
self.logger.info(f"[ALERT_DEBUG] ========== SEND INSTANT ALERTS ==========")
|
|
971
|
+
|
|
972
|
+
if not self.alert_manager:
|
|
973
|
+
self.logger.debug("[ALERT_DEBUG] Alert manager not configured, skipping instant alerts")
|
|
974
|
+
return
|
|
975
|
+
|
|
976
|
+
if not detections:
|
|
977
|
+
self.logger.debug("[ALERT_DEBUG] No detections to send to alert manager")
|
|
978
|
+
return
|
|
979
|
+
|
|
980
|
+
self.logger.info(f"[ALERT_DEBUG] Processing {len(detections)} detection(s) for alerts")
|
|
981
|
+
|
|
982
|
+
# Extract metadata directly from stream_info with empty string defaults
|
|
983
|
+
# No complex nested checks - if not found, pass empty string (no errors)
|
|
984
|
+
camera_id = ""
|
|
985
|
+
app_deployment_id = ""
|
|
986
|
+
application_id = ""
|
|
987
|
+
camera_name = ""
|
|
988
|
+
frame_id = ""
|
|
989
|
+
location_name = ""
|
|
990
|
+
|
|
991
|
+
if stream_info:
|
|
992
|
+
self.logger.debug(f"[ALERT_DEBUG] stream_info keys: {list(stream_info.keys())}")
|
|
993
|
+
# Direct extraction with safe defaults
|
|
994
|
+
camera_id = stream_info.get("camera_id", "")
|
|
995
|
+
if not camera_id and "camera_info" in stream_info:
|
|
996
|
+
camera_id = stream_info.get("camera_info", {}).get("camera_id", "")
|
|
997
|
+
|
|
998
|
+
camera_name = stream_info.get("camera_name", "")
|
|
999
|
+
if not camera_name and "camera_info" in stream_info:
|
|
1000
|
+
camera_name = stream_info.get("camera_info", {}).get("camera_name", "")
|
|
1001
|
+
|
|
1002
|
+
app_deployment_id = stream_info.get("app_deployment_id", "")
|
|
1003
|
+
application_id = stream_info.get("application_id", stream_info.get("app_id", ""))
|
|
1004
|
+
|
|
1005
|
+
# Extract frame_id - it's at root level of stream_info
|
|
1006
|
+
frame_id = stream_info.get("frame_id", "")
|
|
1007
|
+
|
|
1008
|
+
# Extract location_id and fetch location_name from API
|
|
1009
|
+
location_id = ""
|
|
1010
|
+
if "camera_info" in stream_info:
|
|
1011
|
+
location_id = stream_info.get("camera_info", {}).get("location", "")
|
|
1012
|
+
|
|
1013
|
+
if location_id:
|
|
1014
|
+
# Fetch location name from API
|
|
1015
|
+
location_name = self._fetch_location_name(location_id, config.session)
|
|
1016
|
+
else:
|
|
1017
|
+
location_name = "Entry Reception" # Default if no location_id
|
|
1018
|
+
|
|
1019
|
+
self.logger.debug(f"[ALERT_DEBUG] Extracted metadata from stream_info:")
|
|
1020
|
+
self.logger.debug(f"[ALERT_DEBUG] - camera_id: '{camera_id}'")
|
|
1021
|
+
self.logger.debug(f"[ALERT_DEBUG] - camera_name: '{camera_name}'")
|
|
1022
|
+
self.logger.debug(f"[ALERT_DEBUG] - app_deployment_id: '{app_deployment_id}'")
|
|
1023
|
+
self.logger.debug(f"[ALERT_DEBUG] - application_id: '{application_id}'")
|
|
1024
|
+
self.logger.debug(f"[ALERT_DEBUG] - frame_id: '{frame_id}'")
|
|
1025
|
+
self.logger.debug(f"[ALERT_DEBUG] - location_id: '{location_id}'")
|
|
1026
|
+
self.logger.debug(f"[ALERT_DEBUG] - location_name: '{location_name}'")
|
|
1027
|
+
else:
|
|
1028
|
+
self.logger.warning("[ALERT_DEBUG] stream_info is None")
|
|
1029
|
+
location_name = "Entry Reception" # Default
|
|
1030
|
+
|
|
1031
|
+
# Process each detection with a valid plate_text
|
|
1032
|
+
sent_count = 0
|
|
1033
|
+
skipped_count = 0
|
|
1034
|
+
for i, detection in enumerate(detections):
|
|
1035
|
+
self.logger.debug(f"[ALERT_DEBUG] --- Processing detection #{i+1} ---")
|
|
1036
|
+
self.logger.debug(f"[ALERT_DEBUG] Detection keys: {list(detection.keys())}")
|
|
1037
|
+
|
|
1038
|
+
plate_text = detection.get('plate_text', '.')
|
|
1039
|
+
if plate_text:
|
|
1040
|
+
plate_text = plate_text.strip()
|
|
1041
|
+
else:
|
|
1042
|
+
plate_text = ''
|
|
1043
|
+
self.logger.debug(f"[ALERT_DEBUG] Plate text: '{plate_text}'")
|
|
1044
|
+
|
|
1045
|
+
if not plate_text or plate_text == '':
|
|
1046
|
+
self.logger.debug(f"[ALERT_DEBUG] Skipping detection #{i+1} - no plate_text")
|
|
1047
|
+
skipped_count += 1
|
|
1048
|
+
continue
|
|
1049
|
+
|
|
1050
|
+
# Extract detection metadata
|
|
1051
|
+
confidence = detection.get('score', detection.get('confidence', 0.0))
|
|
1052
|
+
bbox = detection.get('bbox', detection.get('bounding_box', []))
|
|
1053
|
+
|
|
1054
|
+
self.logger.debug(f"[ALERT_DEBUG] Confidence: {confidence}")
|
|
1055
|
+
self.logger.debug(f"[ALERT_DEBUG] BBox: {bbox}")
|
|
1056
|
+
|
|
1057
|
+
# Build coordinates dict
|
|
1058
|
+
coordinates = {}
|
|
1059
|
+
if isinstance(bbox, dict):
|
|
1060
|
+
# Handle dict format bbox
|
|
1061
|
+
if 'xmin' in bbox:
|
|
1062
|
+
coordinates = {
|
|
1063
|
+
"x": int(bbox.get('xmin', 0)),
|
|
1064
|
+
"y": int(bbox.get('ymin', 0)),
|
|
1065
|
+
"width": int(bbox.get('xmax', 0) - bbox.get('xmin', 0)),
|
|
1066
|
+
"height": int(bbox.get('ymax', 0) - bbox.get('ymin', 0))
|
|
1067
|
+
}
|
|
1068
|
+
elif 'x' in bbox:
|
|
1069
|
+
coordinates = {
|
|
1070
|
+
"x": int(bbox.get('x', 0)),
|
|
1071
|
+
"y": int(bbox.get('y', 0)),
|
|
1072
|
+
"width": int(bbox.get('width', 0)),
|
|
1073
|
+
"height": int(bbox.get('height', 0))
|
|
1074
|
+
}
|
|
1075
|
+
elif isinstance(bbox, list) and len(bbox) >= 4:
|
|
1076
|
+
x1, y1, x2, y2 = bbox[:4]
|
|
1077
|
+
coordinates = {
|
|
1078
|
+
"x": int(x1),
|
|
1079
|
+
"y": int(y1),
|
|
1080
|
+
"width": int(x2 - x1),
|
|
1081
|
+
"height": int(y2 - y1)
|
|
1082
|
+
}
|
|
1083
|
+
|
|
1084
|
+
self.logger.debug(f"[ALERT_DEBUG] Coordinates: {coordinates}")
|
|
1085
|
+
|
|
1086
|
+
# Build detection event for alert system
|
|
1087
|
+
detection_event = {
|
|
1088
|
+
"camera_id": camera_id,
|
|
1089
|
+
"app_deployment_id": app_deployment_id,
|
|
1090
|
+
"application_id": application_id,
|
|
1091
|
+
"detectionType": "license_plate",
|
|
1092
|
+
"plateNumber": plate_text,
|
|
1093
|
+
"confidence": float(confidence),
|
|
1094
|
+
"frameUrl": "", # Will be filled by analytics publisher if needed
|
|
1095
|
+
"coordinates": coordinates,
|
|
1096
|
+
"cameraName": camera_name,
|
|
1097
|
+
"locationName": location_name,
|
|
1098
|
+
"frame_id": frame_id,
|
|
1099
|
+
"vehicleType": detection.get('vehicle_type', ''),
|
|
1100
|
+
"vehicleColor": detection.get('vehicle_color', ''),
|
|
1101
|
+
"timestamp": datetime.now(timezone.utc).isoformat()
|
|
1102
|
+
}
|
|
1103
|
+
|
|
1104
|
+
self.logger.info(f"[ALERT_DEBUG] Detection event #{i+1} built: {detection_event}")
|
|
1105
|
+
|
|
1106
|
+
# Send to alert manager for evaluation
|
|
1107
|
+
try:
|
|
1108
|
+
self.logger.info(f"[ALERT_DEBUG] Sending detection event #{i+1} to alert manager...")
|
|
1109
|
+
self.alert_manager.process_detection_event(detection_event)
|
|
1110
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Sent detection event to alert manager: plate={plate_text}, confidence={confidence:.2f}")
|
|
1111
|
+
sent_count += 1
|
|
1112
|
+
except Exception as e:
|
|
1113
|
+
self.logger.error(f"[ALERT_DEBUG] ❌ Error sending detection event to alert manager: {e}", exc_info=True)
|
|
1114
|
+
|
|
1115
|
+
self.logger.info(f"[ALERT_DEBUG] Summary: {sent_count} sent, {skipped_count} skipped")
|
|
1116
|
+
self.logger.info(f"[ALERT_DEBUG] ========== INSTANT ALERTS PROCESSED ==========")
|
|
540
1117
|
|
|
541
1118
|
def _initialize_plate_logger(self, config: LicensePlateMonitorConfig) -> bool:
|
|
542
1119
|
"""Initialize the plate logger if lpr_server_id is provided. Returns True if successful."""
|
|
@@ -575,7 +1152,8 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
575
1152
|
print(f"[LP_LOGGING] Starting plate logging check - detections count: {len(detections)}")
|
|
576
1153
|
self.logger.info(f"[LP_LOGGING] Starting plate logging check - detections count: {len(detections)}")
|
|
577
1154
|
self.logger.info(f"[LP_LOGGING] Logging enabled: {self._logging_enabled}, Plate logger exists: {self.plate_logger is not None}, Stream info exists: {stream_info is not None}")
|
|
578
|
-
|
|
1155
|
+
|
|
1156
|
+
#self._logging_enabled = False # ToDo: DISABLED FOR NOW, ENABLED FOR PRODUCTION
|
|
579
1157
|
if not self._logging_enabled:
|
|
580
1158
|
print("[LP_LOGGING] Plate logging is DISABLED")
|
|
581
1159
|
self.logger.warning("[LP_LOGGING] Plate logging is DISABLED - logging_enabled flag is False")
|
|
@@ -586,10 +1164,10 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
586
1164
|
self.logger.warning("[LP_LOGGING] Plate logging SKIPPED - plate_logger is not initialized (lpr_server_id may not be configured)")
|
|
587
1165
|
return
|
|
588
1166
|
|
|
589
|
-
if not stream_info:
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
1167
|
+
# if not stream_info:
|
|
1168
|
+
# print("[LP_LOGGING] Plate logging SKIPPED - stream_info is None")
|
|
1169
|
+
# self.logger.warning("[LP_LOGGING] Plate logging SKIPPED - stream_info is None")
|
|
1170
|
+
# return
|
|
593
1171
|
|
|
594
1172
|
print("[LP_LOGGING] All pre-conditions met, proceeding with plate logging")
|
|
595
1173
|
self.logger.info(f"[LP_LOGGING] All pre-conditions met, proceeding with plate logging")
|
|
@@ -617,6 +1195,7 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
617
1195
|
else:
|
|
618
1196
|
self.logger.warning(f"[LP_LOGGING] Failed to decode image bytes")
|
|
619
1197
|
except Exception as e:
|
|
1198
|
+
#pass
|
|
620
1199
|
self.logger.error(f"[LP_LOGGING] Exception while encoding frame image: {e}", exc_info=True)
|
|
621
1200
|
else:
|
|
622
1201
|
self.logger.info(f"[LP_LOGGING] No image_bytes provided, sending without image")
|
|
@@ -657,14 +1236,18 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
657
1236
|
print(f"[LP_LOGGING] Plate {plate_text}: {status}")
|
|
658
1237
|
self.logger.info(f"[LP_LOGGING] Plate {plate_text}: {status}")
|
|
659
1238
|
except Exception as e:
|
|
1239
|
+
#pass
|
|
660
1240
|
print(f"[LP_LOGGING] ERROR - Plate {plate_text} failed: {e}")
|
|
661
1241
|
self.logger.error(f"[LP_LOGGING] Plate {plate_text} raised exception: {e}", exc_info=True)
|
|
662
1242
|
|
|
663
1243
|
print("[LP_LOGGING] Plate logging complete")
|
|
664
1244
|
self.logger.info(f"[LP_LOGGING] Plate logging complete")
|
|
665
1245
|
except Exception as e:
|
|
1246
|
+
print(f"[LP_LOGGING] CRITICAL ERROR during plate logging: {e}")
|
|
1247
|
+
|
|
666
1248
|
print(f"[LP_LOGGING] CRITICAL ERROR during plate logging: {e}")
|
|
667
1249
|
self.logger.error(f"[LP_LOGGING] CRITICAL ERROR during plate logging: {e}", exc_info=True)
|
|
1250
|
+
pass
|
|
668
1251
|
else:
|
|
669
1252
|
print("[LP_LOGGING] No plates to log")
|
|
670
1253
|
self.logger.info(f"[LP_LOGGING] No plates to log (plates_to_log is empty)")
|
|
@@ -693,11 +1276,16 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
693
1276
|
else:
|
|
694
1277
|
self.logger.error(f"[LP_LOGGING] Plate logger initialization FAILED - plates will NOT be sent")
|
|
695
1278
|
elif self._plate_logger_initialized:
|
|
696
|
-
|
|
1279
|
+
self.logger.debug(f"[LP_LOGGING] Plate logger already initialized, skipping re-initialization")
|
|
697
1280
|
elif not config.lpr_server_id:
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
1281
|
+
if self._total_frame_counter == 0: #Only log once at start
|
|
1282
|
+
self.logger.warning(f"[LP_LOGGING] Plate logging will be DISABLED - no lpr_server_id provided in config")
|
|
1283
|
+
|
|
1284
|
+
# Initialize alert manager once (lazy initialization on first call)
|
|
1285
|
+
if not self._alert_manager_initialized:
|
|
1286
|
+
self._initialize_alert_manager_once(config)
|
|
1287
|
+
self.logger.info(f"[ALERT] CONFIG OF ALERT SHOULD BE PRINTED")
|
|
1288
|
+
|
|
701
1289
|
# Normalize alert_config if provided as a plain dict (JS JSON)
|
|
702
1290
|
if isinstance(getattr(config, 'alert_config', None), dict):
|
|
703
1291
|
try:
|
|
@@ -718,13 +1306,13 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
718
1306
|
# print("---------CONFIDENCE FILTERING",config.confidence_threshold)
|
|
719
1307
|
# print("---------DATA1--------------",data)
|
|
720
1308
|
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
721
|
-
|
|
1309
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
722
1310
|
|
|
723
1311
|
# Step 2: Apply category mapping if provided
|
|
724
1312
|
if config.index_to_category:
|
|
725
1313
|
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
726
1314
|
#self.logger.debug("Applied category mapping")
|
|
727
|
-
|
|
1315
|
+
print("---------DATA2-STREAM--------------",stream_info)
|
|
728
1316
|
# Step 3: Filter to target categories (handle dict or list)
|
|
729
1317
|
if isinstance(processed_data, dict):
|
|
730
1318
|
processed_data = processed_data.get("detections", [])
|
|
@@ -775,25 +1363,29 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
775
1363
|
#print("---------DATA5--------------",processed_data)
|
|
776
1364
|
# Step 8: Perform OCR on media
|
|
777
1365
|
ocr_analysis = self._analyze_ocr_in_media(processed_data, input_bytes, config)
|
|
778
|
-
self.logger.info(f"[LP_LOGGING] OCR analysis completed, found {len(ocr_analysis)} results")
|
|
1366
|
+
#self.logger.info(f"[LP_LOGGING] OCR analysis completed, found {len(ocr_analysis)} results")
|
|
779
1367
|
ocr_plates_found = [r.get('plate_text') for r in ocr_analysis if r.get('plate_text')]
|
|
780
|
-
if ocr_plates_found:
|
|
781
|
-
|
|
782
|
-
else:
|
|
783
|
-
|
|
1368
|
+
# if ocr_plates_found:
|
|
1369
|
+
# self.logger.info(f"[LP_LOGGING] OCR detected plates: {ocr_plates_found}")
|
|
1370
|
+
# else:
|
|
1371
|
+
# self.logger.warning(f"[LP_LOGGING] OCR did not detect any valid plate texts")
|
|
784
1372
|
|
|
785
1373
|
# Step 9: Update plate texts
|
|
786
1374
|
processed_data = self._update_detections_with_ocr(processed_data, ocr_analysis)
|
|
787
1375
|
self._update_plate_texts(processed_data)
|
|
788
|
-
|
|
1376
|
+
print("[LP_LOGGING]DEBUG -1")
|
|
1377
|
+
|
|
789
1378
|
# Log final detection state before sending
|
|
790
1379
|
final_plates = [d.get('plate_text') for d in processed_data if d.get('plate_text')]
|
|
791
1380
|
self.logger.info(f"[LP_LOGGING] After OCR update, {len(final_plates)} detections have plate_text: {final_plates}")
|
|
792
|
-
|
|
1381
|
+
|
|
793
1382
|
# Step 9.5: Log detected plates to RPC (optional, only if lpr_server_id is provided)
|
|
794
1383
|
# Direct await since process is now async
|
|
795
1384
|
await self._log_detected_plates(processed_data, config, stream_info, input_bytes)
|
|
796
|
-
|
|
1385
|
+
print("[LP_LOGGING]DEBUG -2")
|
|
1386
|
+
# Step 9.6: Send detections to instant alert system (if configured)
|
|
1387
|
+
self._send_instant_alerts(processed_data, stream_info, config)
|
|
1388
|
+
print("[LP_LOGGING]DEBUG -3")
|
|
797
1389
|
# Step 10: Update frame counter
|
|
798
1390
|
self._total_frame_counter += 1
|
|
799
1391
|
|
|
@@ -809,6 +1401,7 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
809
1401
|
# Step 12: Calculate summaries
|
|
810
1402
|
counting_summary = self._count_categories(processed_data, config)
|
|
811
1403
|
counting_summary['total_counts'] = self.get_total_counts()
|
|
1404
|
+
print("[LP_LOGGING]DEBUG -4")
|
|
812
1405
|
|
|
813
1406
|
# Step 13: Generate alerts and summaries
|
|
814
1407
|
alerts = self._check_alerts(counting_summary, frame_number, config)
|
|
@@ -856,7 +1449,7 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
856
1449
|
processing_fps = (1.0 / proc_time) if proc_time > 0 else None
|
|
857
1450
|
# Log the performance metrics using the module-level logger
|
|
858
1451
|
print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
|
|
859
|
-
|
|
1452
|
+
print("[LP_LOGGING]DEBUG -5")
|
|
860
1453
|
return result
|
|
861
1454
|
|
|
862
1455
|
except Exception as e:
|
|
@@ -1162,35 +1755,39 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1162
1755
|
human_text_lines = []
|
|
1163
1756
|
#print("counting_summary", counting_summary)
|
|
1164
1757
|
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
1758
|
+
sum_of_current_frame_detections = sum(per_category_count.values())
|
|
1759
|
+
|
|
1165
1760
|
if total_detections > 0:
|
|
1761
|
+
#for cat, count in per_category_count.items():
|
|
1762
|
+
human_text_lines.append(f"\t- License Plates Detected: {sum_of_current_frame_detections}")
|
|
1166
1763
|
category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
|
|
1167
1764
|
detection_text = category_counts[0] + " detected" if len(category_counts) == 1 else f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
|
1168
|
-
human_text_lines.append(f"\t- {detection_text}")
|
|
1169
|
-
#
|
|
1765
|
+
#human_text_lines.append(f"\t- {detection_text}")
|
|
1766
|
+
#Show dominant per-track license plates for current frame
|
|
1170
1767
|
seen = set()
|
|
1171
1768
|
display_texts = []
|
|
1172
1769
|
for det in counting_summary.get("detections", []):
|
|
1173
1770
|
t = det.get("track_id")
|
|
1174
1771
|
dom = det.get("plate_text")
|
|
1175
|
-
if not dom or not (self._min_plate_len <= len(dom) <=
|
|
1772
|
+
if not dom or not (self._min_plate_len <= len(dom) <= 5):
|
|
1176
1773
|
continue
|
|
1177
1774
|
if t in seen:
|
|
1178
1775
|
continue
|
|
1179
1776
|
seen.add(t)
|
|
1180
1777
|
display_texts.append(dom)
|
|
1181
|
-
if display_texts:
|
|
1182
|
-
|
|
1778
|
+
# if display_texts:
|
|
1779
|
+
# human_text_lines.append(f"\t- License Plates: {', '.join(display_texts)}")
|
|
1183
1780
|
else:
|
|
1184
|
-
human_text_lines.append(f"\t-
|
|
1781
|
+
human_text_lines.append(f"\t- License Plates Detected: 0")
|
|
1185
1782
|
|
|
1186
1783
|
human_text_lines.append("")
|
|
1187
|
-
human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
|
|
1188
|
-
human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
|
|
1784
|
+
# human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
|
|
1785
|
+
# human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
|
|
1189
1786
|
|
|
1190
|
-
if self._unique_plate_texts:
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1787
|
+
# if self._unique_plate_texts:
|
|
1788
|
+
# human_text_lines.append("\t- Unique License Plates:")
|
|
1789
|
+
# for text in sorted(self._unique_plate_texts.values()):
|
|
1790
|
+
# human_text_lines.append(f"\t\t- {text}")
|
|
1194
1791
|
|
|
1195
1792
|
current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
|
|
1196
1793
|
total_counts_list = [{"category": cat, "count": count} for cat, count in total_counts.items() if count > 0 or cumulative_total > 0]
|
|
@@ -1200,10 +1797,10 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1200
1797
|
for detection in counting_summary.get("detections", []):
|
|
1201
1798
|
dom = detection.get("plate_text", "")
|
|
1202
1799
|
if not dom:
|
|
1203
|
-
dom = "
|
|
1800
|
+
dom = ""
|
|
1204
1801
|
bbox = detection.get("bounding_box", {})
|
|
1205
|
-
category = detection.get("category", "
|
|
1206
|
-
|
|
1802
|
+
category = detection.get("category", "")
|
|
1803
|
+
#egmentation = detection.get("masks", detection.get("segmentation", detection.get("mask", [])))
|
|
1207
1804
|
detection_obj = self.create_detection_object(category, bbox, segmentation=None, plate_text=dom)
|
|
1208
1805
|
detections.append(detection_obj)
|
|
1209
1806
|
|
|
@@ -1244,6 +1841,7 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1244
1841
|
start_time=high_precision_start_timestamp,
|
|
1245
1842
|
reset_time=high_precision_reset_timestamp
|
|
1246
1843
|
)
|
|
1844
|
+
tracking_stat['target_categories'] = self.target_categories
|
|
1247
1845
|
tracking_stats.append(tracking_stat)
|
|
1248
1846
|
return tracking_stats
|
|
1249
1847
|
|