matrice-analytics 0.1.89__py3-none-any.whl → 0.1.96__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/post_processing/__init__.py +8 -2
- matrice_analytics/post_processing/config.py +2 -0
- matrice_analytics/post_processing/core/config.py +40 -3
- matrice_analytics/post_processing/face_reg/face_recognition.py +146 -14
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +116 -4
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +19 -0
- matrice_analytics/post_processing/post_processor.py +4 -0
- matrice_analytics/post_processing/usecases/__init__.py +4 -1
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +5 -2
- matrice_analytics/post_processing/usecases/color_detection.py +1 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +94 -14
- matrice_analytics/post_processing/usecases/footfall.py +750 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +91 -1
- matrice_analytics/post_processing/usecases/people_counting.py +55 -22
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +2 -1
- matrice_analytics/post_processing/utils/alert_instance_utils.py +94 -26
- matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +97 -4
- matrice_analytics/post_processing/utils/incident_manager_utils.py +103 -6
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.96.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.96.dist-info}/RECORD +24 -23
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.96.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.96.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.89.dist-info → matrice_analytics-0.1.96.dist-info}/top_level.txt +0 -0
|
@@ -610,6 +610,68 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
610
610
|
#self.logger.error(f"Error fetching external IP: {e}", exc_info=True)
|
|
611
611
|
return "localhost"
|
|
612
612
|
|
|
613
|
+
def _fetch_location_name(self, location_id: str, session: Optional[Session] = None) -> str:
|
|
614
|
+
"""
|
|
615
|
+
Fetch location name from API using location_id.
|
|
616
|
+
|
|
617
|
+
Args:
|
|
618
|
+
location_id: The location ID to look up
|
|
619
|
+
session: Matrice session for API calls
|
|
620
|
+
|
|
621
|
+
Returns:
|
|
622
|
+
Location name string, or 'Entry Reception' as default if API fails
|
|
623
|
+
"""
|
|
624
|
+
default_location = "Entry Reception"
|
|
625
|
+
|
|
626
|
+
if not location_id:
|
|
627
|
+
self.logger.debug(f"[LOCATION] No location_id provided, using default: '{default_location}'")
|
|
628
|
+
return default_location
|
|
629
|
+
|
|
630
|
+
# Check cache first
|
|
631
|
+
if not hasattr(self, '_location_name_cache'):
|
|
632
|
+
self._location_name_cache: Dict[str, str] = {}
|
|
633
|
+
|
|
634
|
+
if location_id in self._location_name_cache:
|
|
635
|
+
cached_name = self._location_name_cache[location_id]
|
|
636
|
+
self.logger.debug(f"[LOCATION] Using cached location name for '{location_id}': '{cached_name}'")
|
|
637
|
+
return cached_name
|
|
638
|
+
|
|
639
|
+
if not session:
|
|
640
|
+
self.logger.warning(f"[LOCATION] No session provided, using default: '{default_location}'")
|
|
641
|
+
return default_location
|
|
642
|
+
|
|
643
|
+
try:
|
|
644
|
+
endpoint = f"/v1/inference/get_location/{location_id}"
|
|
645
|
+
self.logger.info(f"[LOCATION] Fetching location name from API: {endpoint}")
|
|
646
|
+
|
|
647
|
+
response = session.rpc.get(endpoint)
|
|
648
|
+
|
|
649
|
+
if response and isinstance(response, dict):
|
|
650
|
+
success = response.get("success", False)
|
|
651
|
+
if success:
|
|
652
|
+
data = response.get("data", {})
|
|
653
|
+
location_name = data.get("locationName", default_location)
|
|
654
|
+
self.logger.info(f"[LOCATION] ✓ Fetched location name: '{location_name}' for location_id: '{location_id}'")
|
|
655
|
+
|
|
656
|
+
# Cache the result
|
|
657
|
+
self._location_name_cache[location_id] = location_name
|
|
658
|
+
return location_name
|
|
659
|
+
else:
|
|
660
|
+
self.logger.warning(
|
|
661
|
+
f"[LOCATION] API returned success=false for location_id '{location_id}': "
|
|
662
|
+
f"{response.get('message', 'Unknown error')}"
|
|
663
|
+
)
|
|
664
|
+
else:
|
|
665
|
+
self.logger.warning(f"[LOCATION] Invalid response format from API: {response}")
|
|
666
|
+
|
|
667
|
+
except Exception as e:
|
|
668
|
+
self.logger.error(f"[LOCATION] Error fetching location name for '{location_id}': {e}", exc_info=True)
|
|
669
|
+
|
|
670
|
+
# Use default on any failure
|
|
671
|
+
self.logger.info(f"[LOCATION] Using default location name: '{default_location}'")
|
|
672
|
+
self._location_name_cache[location_id] = default_location
|
|
673
|
+
return default_location
|
|
674
|
+
|
|
613
675
|
def _initialize_alert_manager_once(self, config: LicensePlateMonitorConfig) -> None:
|
|
614
676
|
"""
|
|
615
677
|
Initialize alert manager ONCE with Redis OR Kafka clients (Environment based).
|
|
@@ -839,13 +901,18 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
839
901
|
|
|
840
902
|
# Create alert manager if client is available
|
|
841
903
|
if redis_client or kafka_client:
|
|
904
|
+
# Get app_deployment_id from action_details for filtering alerts
|
|
905
|
+
app_deployment_id_for_alert = getattr(self, '_app_deployment_id', None)
|
|
906
|
+
self.logger.info(f"[ALERT] Using app_deployment_id for alert filtering: {app_deployment_id_for_alert}")
|
|
907
|
+
|
|
842
908
|
self.alert_manager = ALERT_INSTANCE(
|
|
843
909
|
redis_client=redis_client,
|
|
844
910
|
kafka_client=kafka_client,
|
|
845
911
|
config_topic="alert_instant_config_request",
|
|
846
912
|
trigger_topic="alert_instant_triggered",
|
|
847
913
|
polling_interval=10, # Poll every 10 seconds
|
|
848
|
-
logger=self.logger
|
|
914
|
+
logger=self.logger,
|
|
915
|
+
app_deployment_id=app_deployment_id_for_alert
|
|
849
916
|
)
|
|
850
917
|
self.alert_manager.start()
|
|
851
918
|
transport = "Redis" if redis_client else "Kafka"
|
|
@@ -918,6 +985,8 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
918
985
|
app_deployment_id = ""
|
|
919
986
|
application_id = ""
|
|
920
987
|
camera_name = ""
|
|
988
|
+
frame_id = ""
|
|
989
|
+
location_name = ""
|
|
921
990
|
|
|
922
991
|
if stream_info:
|
|
923
992
|
self.logger.debug(f"[ALERT_DEBUG] stream_info keys: {list(stream_info.keys())}")
|
|
@@ -933,13 +1002,31 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
933
1002
|
app_deployment_id = stream_info.get("app_deployment_id", "")
|
|
934
1003
|
application_id = stream_info.get("application_id", stream_info.get("app_id", ""))
|
|
935
1004
|
|
|
1005
|
+
# Extract frame_id - it's at root level of stream_info
|
|
1006
|
+
frame_id = stream_info.get("frame_id", "")
|
|
1007
|
+
|
|
1008
|
+
# Extract location_id and fetch location_name from API
|
|
1009
|
+
location_id = ""
|
|
1010
|
+
if "camera_info" in stream_info:
|
|
1011
|
+
location_id = stream_info.get("camera_info", {}).get("location", "")
|
|
1012
|
+
|
|
1013
|
+
if location_id:
|
|
1014
|
+
# Fetch location name from API
|
|
1015
|
+
location_name = self._fetch_location_name(location_id, config.session)
|
|
1016
|
+
else:
|
|
1017
|
+
location_name = "Entry Reception" # Default if no location_id
|
|
1018
|
+
|
|
936
1019
|
self.logger.debug(f"[ALERT_DEBUG] Extracted metadata from stream_info:")
|
|
937
1020
|
self.logger.debug(f"[ALERT_DEBUG] - camera_id: '{camera_id}'")
|
|
938
1021
|
self.logger.debug(f"[ALERT_DEBUG] - camera_name: '{camera_name}'")
|
|
939
1022
|
self.logger.debug(f"[ALERT_DEBUG] - app_deployment_id: '{app_deployment_id}'")
|
|
940
1023
|
self.logger.debug(f"[ALERT_DEBUG] - application_id: '{application_id}'")
|
|
1024
|
+
self.logger.debug(f"[ALERT_DEBUG] - frame_id: '{frame_id}'")
|
|
1025
|
+
self.logger.debug(f"[ALERT_DEBUG] - location_id: '{location_id}'")
|
|
1026
|
+
self.logger.debug(f"[ALERT_DEBUG] - location_name: '{location_name}'")
|
|
941
1027
|
else:
|
|
942
1028
|
self.logger.warning("[ALERT_DEBUG] stream_info is None")
|
|
1029
|
+
location_name = "Entry Reception" # Default
|
|
943
1030
|
|
|
944
1031
|
# Process each detection with a valid plate_text
|
|
945
1032
|
sent_count = 0
|
|
@@ -1007,6 +1094,8 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1007
1094
|
"frameUrl": "", # Will be filled by analytics publisher if needed
|
|
1008
1095
|
"coordinates": coordinates,
|
|
1009
1096
|
"cameraName": camera_name,
|
|
1097
|
+
"locationName": location_name,
|
|
1098
|
+
"frame_id": frame_id,
|
|
1010
1099
|
"vehicleType": detection.get('vehicle_type', ''),
|
|
1011
1100
|
"vehicleColor": detection.get('vehicle_color', ''),
|
|
1012
1101
|
"timestamp": datetime.now(timezone.utc).isoformat()
|
|
@@ -1752,6 +1841,7 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
|
1752
1841
|
start_time=high_precision_start_timestamp,
|
|
1753
1842
|
reset_time=high_precision_reset_timestamp
|
|
1754
1843
|
)
|
|
1844
|
+
tracking_stat['target_categories'] = self.target_categories
|
|
1755
1845
|
tracking_stats.append(tracking_stat)
|
|
1756
1846
|
return tracking_stats
|
|
1757
1847
|
|
|
@@ -36,7 +36,7 @@ class PeopleCountingUseCase(BaseProcessor):
|
|
|
36
36
|
self.category = "general"
|
|
37
37
|
self.CASE_TYPE: Optional[str] = 'people_counting'
|
|
38
38
|
self.CASE_VERSION: Optional[str] = '1.4'
|
|
39
|
-
self.target_categories = ['person', 'people','human','man','woman','male','female']
|
|
39
|
+
self.target_categories = ['person'] #['person', 'people','human','man','woman','male','female']
|
|
40
40
|
self.smoothing_tracker = None
|
|
41
41
|
self.tracker = None
|
|
42
42
|
self._total_frame_counter = 0
|
|
@@ -50,6 +50,19 @@ class PeopleCountingUseCase(BaseProcessor):
|
|
|
50
50
|
self.current_incident_end_timestamp: str = "N/A"
|
|
51
51
|
self.start_timer = None
|
|
52
52
|
|
|
53
|
+
def _simple_tracker_update(self, detections: list) -> list:
|
|
54
|
+
"""
|
|
55
|
+
====== PERFORMANCE: Lightweight tracker alternative ======
|
|
56
|
+
Simple tracker using frame-local indexing.
|
|
57
|
+
Much faster than AdvancedTracker - O(n) complexity.
|
|
58
|
+
Does not persist track IDs across frames.
|
|
59
|
+
Enable via config.enable_simple_tracker = True
|
|
60
|
+
"""
|
|
61
|
+
for i, det in enumerate(detections):
|
|
62
|
+
if det.get('track_id') is None:
|
|
63
|
+
det['track_id'] = f"simple_{self._total_frame_counter}_{i}"
|
|
64
|
+
return detections
|
|
65
|
+
|
|
53
66
|
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
|
54
67
|
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
55
68
|
processing_start = time.time()
|
|
@@ -90,20 +103,29 @@ class PeopleCountingUseCase(BaseProcessor):
|
|
|
90
103
|
# self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
91
104
|
# processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
92
105
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
106
|
+
# ====== TRACKER SELECTION (both disabled by default for max performance) ======
|
|
107
|
+
# Set config.enable_advanced_tracker=True or config.enable_simple_tracker=True to enable
|
|
108
|
+
if getattr(config, 'enable_advanced_tracker', False):
|
|
109
|
+
# Heavy O(n³) tracker - use only when tracking quality is critical
|
|
110
|
+
try:
|
|
111
|
+
from ..advanced_tracker import AdvancedTracker
|
|
112
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
113
|
+
if self.tracker is None:
|
|
114
|
+
tracker_config = TrackerConfig(
|
|
115
|
+
track_high_thresh=0.4,
|
|
116
|
+
track_low_thresh=0.05,
|
|
117
|
+
new_track_thresh=0.3,
|
|
118
|
+
match_thresh=0.8)
|
|
119
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
120
|
+
self.logger.info("Initialized AdvancedTracker for People Counting")
|
|
121
|
+
processed_data = self.tracker.update(processed_data)
|
|
122
|
+
except Exception as e:
|
|
123
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
124
|
+
elif getattr(config, 'enable_simple_tracker', False):
|
|
125
|
+
# Lightweight O(n) tracker - fast but no cross-frame persistence
|
|
126
|
+
processed_data = self._simple_tracker_update(processed_data)
|
|
127
|
+
# else: No tracking - maximum performance, just use raw detections
|
|
128
|
+
# ====== END TRACKER SELECTION ======
|
|
107
129
|
|
|
108
130
|
self._update_tracking_state(processed_data)
|
|
109
131
|
self._total_frame_counter += 1
|
|
@@ -362,6 +384,7 @@ class PeopleCountingUseCase(BaseProcessor):
|
|
|
362
384
|
start_time=high_precision_start_timestamp,
|
|
363
385
|
reset_time=high_precision_reset_timestamp
|
|
364
386
|
)
|
|
387
|
+
tracking_stat['target_categories'] = self.target_categories
|
|
365
388
|
tracking_stats.append(tracking_stat)
|
|
366
389
|
return tracking_stats
|
|
367
390
|
|
|
@@ -414,14 +437,24 @@ class PeopleCountingUseCase(BaseProcessor):
|
|
|
414
437
|
|
|
415
438
|
for det in detections:
|
|
416
439
|
cat = det.get("category")
|
|
417
|
-
|
|
418
|
-
if cat not in self.target_categories
|
|
440
|
+
track_id = det.get("track_id")
|
|
441
|
+
if cat not in self.target_categories:
|
|
419
442
|
continue
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
self.
|
|
443
|
+
|
|
444
|
+
# ====== PERFORMANCE: Skip heavy track merging O(n*m) ======
|
|
445
|
+
# To enable track merging, uncomment below and comment the simple counting section
|
|
446
|
+
# bbox = det.get("bounding_box", det.get("bbox"))
|
|
447
|
+
# canonical_id = self._merge_or_register_track(track_id, bbox)
|
|
448
|
+
# det["track_id"] = canonical_id
|
|
449
|
+
# self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
450
|
+
# self._current_frame_track_ids[cat].add(canonical_id)
|
|
451
|
+
# ====== END HEAVY TRACK MERGING ======
|
|
452
|
+
|
|
453
|
+
# ====== SIMPLE COUNTING (default - no track merging overhead) ======
|
|
454
|
+
if track_id is not None:
|
|
455
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(track_id)
|
|
456
|
+
self._current_frame_track_ids[cat].add(track_id)
|
|
457
|
+
# ====== END SIMPLE COUNTING ======
|
|
425
458
|
|
|
426
459
|
def get_total_counts(self):
|
|
427
460
|
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
|
@@ -683,6 +683,7 @@ class VehicleMonitoringUseCase(BaseProcessor):
|
|
|
683
683
|
start_time=high_precision_start_timestamp,
|
|
684
684
|
reset_time=high_precision_reset_timestamp
|
|
685
685
|
)
|
|
686
|
+
tracking_stat['target_categories'] = self.target_categories
|
|
686
687
|
tracking_stats.append(tracking_stat)
|
|
687
688
|
return tracking_stats
|
|
688
689
|
|
|
@@ -317,7 +317,7 @@ class WeaponDetectionUseCase(BaseProcessor):
|
|
|
317
317
|
total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
|
|
318
318
|
|
|
319
319
|
# Build current_counts
|
|
320
|
-
current_counts = [{"category":
|
|
320
|
+
current_counts = [{"category": 'Weapon', "count": count} for cat, count in per_category_count.items() if count > 0]
|
|
321
321
|
|
|
322
322
|
# Prepare detections
|
|
323
323
|
detections = []
|
|
@@ -377,6 +377,7 @@ class WeaponDetectionUseCase(BaseProcessor):
|
|
|
377
377
|
start_time=high_precision_start_timestamp,
|
|
378
378
|
reset_time=high_precision_reset_timestamp
|
|
379
379
|
)
|
|
380
|
+
tracking_stat['target_categories'] = ['Weapon']
|
|
380
381
|
tracking_stats.append(tracking_stat)
|
|
381
382
|
return tracking_stats
|
|
382
383
|
|
|
@@ -76,7 +76,8 @@ class ALERT_INSTANCE:
|
|
|
76
76
|
config_topic: str = "alert_instant_config_request",
|
|
77
77
|
trigger_topic: str = "alert_instant_triggered",
|
|
78
78
|
polling_interval: int = 10,
|
|
79
|
-
logger: Optional[logging.Logger] = None
|
|
79
|
+
logger: Optional[logging.Logger] = None,
|
|
80
|
+
app_deployment_id: Optional[str] = None
|
|
80
81
|
):
|
|
81
82
|
"""
|
|
82
83
|
Initialize ALERT_INSTANCE.
|
|
@@ -88,6 +89,7 @@ class ALERT_INSTANCE:
|
|
|
88
89
|
trigger_topic: Topic/stream name for publishing triggers
|
|
89
90
|
polling_interval: Seconds between config polling
|
|
90
91
|
logger: Python logger instance
|
|
92
|
+
app_deployment_id: App deployment ID to filter incoming alerts (only process alerts matching this ID)
|
|
91
93
|
"""
|
|
92
94
|
self.redis_client = redis_client
|
|
93
95
|
self.kafka_client = kafka_client
|
|
@@ -95,6 +97,7 @@ class ALERT_INSTANCE:
|
|
|
95
97
|
self.trigger_topic = trigger_topic
|
|
96
98
|
self.polling_interval = polling_interval
|
|
97
99
|
self.logger = logger or logging.getLogger(__name__)
|
|
100
|
+
self.app_deployment_id = app_deployment_id
|
|
98
101
|
|
|
99
102
|
# In-memory alert storage: {instant_alert_id: AlertConfig}
|
|
100
103
|
self._alerts: Dict[str, AlertConfig] = {}
|
|
@@ -114,7 +117,7 @@ class ALERT_INSTANCE:
|
|
|
114
117
|
self.logger.info(
|
|
115
118
|
f"Initialized ALERT_INSTANCE with config_topic={config_topic}, "
|
|
116
119
|
f"trigger_topic={trigger_topic}, polling_interval={polling_interval}s, "
|
|
117
|
-
f"cooldown={self._cooldown_seconds}s"
|
|
120
|
+
f"cooldown={self._cooldown_seconds}s, app_deployment_id={app_deployment_id}"
|
|
118
121
|
)
|
|
119
122
|
|
|
120
123
|
def start(self):
|
|
@@ -394,6 +397,29 @@ class ALERT_INSTANCE:
|
|
|
394
397
|
self.logger.warning("[ALERT_DEBUG] Skipping malformed config with 'raw' key only")
|
|
395
398
|
return
|
|
396
399
|
|
|
400
|
+
# Log detection_service field (informational only, no filtering)
|
|
401
|
+
detection_service = config_data.get('detection_service', '')
|
|
402
|
+
self.logger.info(f"[ALERT_DEBUG] detection_service: '{detection_service}'")
|
|
403
|
+
|
|
404
|
+
# Filter by app_deployment_id - only process alerts that match our app_deployment_id
|
|
405
|
+
incoming_app_deployment_id = config_data.get('app_deployment_id', '')
|
|
406
|
+
if self.app_deployment_id:
|
|
407
|
+
if incoming_app_deployment_id != self.app_deployment_id:
|
|
408
|
+
self.logger.info(
|
|
409
|
+
f"[ALERT_DEBUG] Skipping alert - app_deployment_id mismatch: "
|
|
410
|
+
f"incoming='{incoming_app_deployment_id}', ours='{self.app_deployment_id}'"
|
|
411
|
+
)
|
|
412
|
+
return
|
|
413
|
+
else:
|
|
414
|
+
self.logger.info(
|
|
415
|
+
f"[ALERT_DEBUG] ✓ app_deployment_id match: '{incoming_app_deployment_id}'"
|
|
416
|
+
)
|
|
417
|
+
else:
|
|
418
|
+
self.logger.warning(
|
|
419
|
+
f"[ALERT_DEBUG] No app_deployment_id filter set, processing all alerts. "
|
|
420
|
+
f"Incoming app_deployment_id: '{incoming_app_deployment_id}'"
|
|
421
|
+
)
|
|
422
|
+
|
|
397
423
|
# Log individual fields before creating AlertConfig
|
|
398
424
|
self.logger.debug(f"[ALERT_DEBUG] Extracted fields from config_data:")
|
|
399
425
|
self.logger.debug(f"[ALERT_DEBUG] - instant_alert_id: '{config_data.get('instant_alert_id', 'MISSING')}'")
|
|
@@ -706,7 +732,13 @@ class ALERT_INSTANCE:
|
|
|
706
732
|
detection: Dict[str, Any],
|
|
707
733
|
config: Dict[str, Any]
|
|
708
734
|
) -> bool:
|
|
709
|
-
"""
|
|
735
|
+
"""
|
|
736
|
+
Evaluate license plate detection against alert criteria.
|
|
737
|
+
|
|
738
|
+
Supports two alert conditions:
|
|
739
|
+
- "in_list" (BLACKLIST): Alert ONLY when detected plate IS in targetPlates list
|
|
740
|
+
- "not_in_list" (WHITELIST): Alert when detected plate is NOT in targetPlates list
|
|
741
|
+
"""
|
|
710
742
|
self.logger.debug(f"[ALERT_DEBUG] ========== EVALUATING LPR ALERT ==========")
|
|
711
743
|
self.logger.debug(f"[ALERT_DEBUG] Alert ID: {alert.instant_alert_id}")
|
|
712
744
|
self.logger.debug(f"[ALERT_DEBUG] Alert Name: {alert.alert_name}")
|
|
@@ -715,9 +747,12 @@ class ALERT_INSTANCE:
|
|
|
715
747
|
|
|
716
748
|
target_plates = config.get("targetPlates", [])
|
|
717
749
|
min_confidence = config.get("minConfidence", 0.0)
|
|
750
|
+
# Get alertCondition: "in_list" (blacklist) or "not_in_list" (whitelist)
|
|
751
|
+
alert_condition = config.get("alertCondition", "in_list")
|
|
718
752
|
|
|
719
753
|
self.logger.debug(f"[ALERT_DEBUG] Target plates: {target_plates}")
|
|
720
754
|
self.logger.debug(f"[ALERT_DEBUG] Min confidence: {min_confidence}")
|
|
755
|
+
self.logger.info(f"[ALERT_DEBUG] Alert condition: '{alert_condition}' (in_list=blacklist, not_in_list=whitelist)")
|
|
721
756
|
|
|
722
757
|
plate_number = detection.get("plateNumber", "").upper().strip()
|
|
723
758
|
confidence = detection.get("confidence", 0.0)
|
|
@@ -725,37 +760,68 @@ class ALERT_INSTANCE:
|
|
|
725
760
|
self.logger.debug(f"[ALERT_DEBUG] Detected plate (normalized): '{plate_number}'")
|
|
726
761
|
self.logger.debug(f"[ALERT_DEBUG] Detection confidence: {confidence}")
|
|
727
762
|
|
|
763
|
+
# Skip empty plate numbers
|
|
764
|
+
if not plate_number:
|
|
765
|
+
self.logger.debug(f"[ALERT_DEBUG] ✗ Empty plate number, skipping")
|
|
766
|
+
return False
|
|
767
|
+
|
|
728
768
|
# Check if plate matches target list (case-insensitive)
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
for target in target_plates
|
|
732
|
-
)
|
|
769
|
+
normalized_targets = [str(t).upper().strip() for t in target_plates]
|
|
770
|
+
plate_in_list = plate_number in normalized_targets
|
|
733
771
|
|
|
734
|
-
self.logger.debug(f"[ALERT_DEBUG]
|
|
735
|
-
|
|
736
|
-
normalized_targets = [str(t).upper().strip() for t in target_plates]
|
|
737
|
-
self.logger.debug(f"[ALERT_DEBUG] Normalized target plates: {normalized_targets}")
|
|
738
|
-
self.logger.debug(f"[ALERT_DEBUG] Plate '{plate_number}' not in {normalized_targets}")
|
|
772
|
+
self.logger.debug(f"[ALERT_DEBUG] Normalized target plates: {normalized_targets}")
|
|
773
|
+
self.logger.debug(f"[ALERT_DEBUG] Plate '{plate_number}' in list: {plate_in_list}")
|
|
739
774
|
|
|
740
|
-
# Check confidence threshold
|
|
741
|
-
min_confidence=0.05
|
|
775
|
+
# Check confidence threshold (minimum 0.05)
|
|
776
|
+
min_confidence = max(0.05, min_confidence)
|
|
742
777
|
confidence_match = confidence >= min_confidence
|
|
743
778
|
|
|
744
779
|
self.logger.debug(f"[ALERT_DEBUG] Confidence match result: {confidence_match} ({confidence} >= {min_confidence})")
|
|
745
780
|
|
|
746
|
-
if
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
781
|
+
# Determine if alert should trigger based on alertCondition
|
|
782
|
+
should_trigger = False
|
|
783
|
+
|
|
784
|
+
if alert_condition == "in_list":
|
|
785
|
+
# BLACKLIST: Alert only when plate IS in the target list
|
|
786
|
+
if plate_in_list and confidence_match:
|
|
787
|
+
should_trigger = True
|
|
788
|
+
self.logger.info(
|
|
789
|
+
f"[ALERT_DEBUG] ✓ LPR BLACKLIST ALERT TRIGGERED: {alert.alert_name} - "
|
|
790
|
+
f"Plate: {plate_number} IS in blacklist, Confidence: {confidence:.2f}"
|
|
791
|
+
)
|
|
792
|
+
else:
|
|
793
|
+
self.logger.debug(
|
|
794
|
+
f"[ALERT_DEBUG] ✗ LPR blacklist alert NOT triggered: {alert.alert_name} - "
|
|
795
|
+
f"Plate '{plate_number}' in_list={plate_in_list}, confidence_match={confidence_match}"
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
elif alert_condition == "not_in_list":
|
|
799
|
+
# WHITELIST: Alert when plate is NOT in the target list
|
|
800
|
+
if not plate_in_list and confidence_match:
|
|
801
|
+
should_trigger = True
|
|
802
|
+
self.logger.info(
|
|
803
|
+
f"[ALERT_DEBUG] ✓ LPR WHITELIST ALERT TRIGGERED: {alert.alert_name} - "
|
|
804
|
+
f"Plate: {plate_number} is NOT in whitelist, Confidence: {confidence:.2f}"
|
|
805
|
+
)
|
|
806
|
+
else:
|
|
807
|
+
self.logger.debug(
|
|
808
|
+
f"[ALERT_DEBUG] ✗ LPR whitelist alert NOT triggered: {alert.alert_name} - "
|
|
809
|
+
f"Plate '{plate_number}' in_list={plate_in_list} (whitelisted), confidence_match={confidence_match}"
|
|
810
|
+
)
|
|
811
|
+
|
|
752
812
|
else:
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
f"
|
|
813
|
+
# Unknown condition, default to blacklist behavior for backward compatibility
|
|
814
|
+
self.logger.warning(
|
|
815
|
+
f"[ALERT_DEBUG] Unknown alertCondition '{alert_condition}', defaulting to 'in_list' (blacklist) behavior"
|
|
756
816
|
)
|
|
817
|
+
if plate_in_list and confidence_match:
|
|
818
|
+
should_trigger = True
|
|
819
|
+
self.logger.info(
|
|
820
|
+
f"[ALERT_DEBUG] ✓ LPR ALERT TRIGGERED (default): {alert.alert_name} - "
|
|
821
|
+
f"Plate: {plate_number}, Confidence: {confidence:.2f}"
|
|
822
|
+
)
|
|
757
823
|
|
|
758
|
-
return
|
|
824
|
+
return should_trigger
|
|
759
825
|
|
|
760
826
|
def _evaluate_count_alert(
|
|
761
827
|
self,
|
|
@@ -868,10 +934,11 @@ class ALERT_INSTANCE:
|
|
|
868
934
|
"detectionType": detection_type_raw,
|
|
869
935
|
"confidence": detection.get("confidence", 0.0),
|
|
870
936
|
"coordinates": detection.get("coordinates", {}),
|
|
871
|
-
"cameraName": detection.get("cameraName", "")
|
|
937
|
+
"cameraName": detection.get("cameraName", ""),
|
|
938
|
+
"locationName": detection.get("locationName", "")
|
|
872
939
|
}
|
|
873
940
|
|
|
874
|
-
# Add type-specific
|
|
941
|
+
# Add type-specific fields
|
|
875
942
|
if detection_type_raw == "license_plate":
|
|
876
943
|
context_data.update({
|
|
877
944
|
"plateNumber": detection.get("plateNumber", ""),
|
|
@@ -900,6 +967,7 @@ class ALERT_INSTANCE:
|
|
|
900
967
|
trigger_message = {
|
|
901
968
|
"instant_alert_id": alert.instant_alert_id,
|
|
902
969
|
"camera_id": alert.camera_id,
|
|
970
|
+
"frame_id": detection.get("frame_id", ""),
|
|
903
971
|
"triggered_at": datetime.now(timezone.utc).isoformat(),
|
|
904
972
|
"context_data": context_data
|
|
905
973
|
}
|