matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,656 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import asdict
|
|
3
|
+
import time
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
|
|
6
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
|
7
|
+
from ..utils import (
|
|
8
|
+
filter_by_confidence,
|
|
9
|
+
filter_by_categories,
|
|
10
|
+
apply_category_mapping,
|
|
11
|
+
count_objects_by_category,
|
|
12
|
+
count_objects_in_zones,
|
|
13
|
+
calculate_counting_summary,
|
|
14
|
+
match_results_structure,
|
|
15
|
+
bbox_smoothing,
|
|
16
|
+
BBoxSmoothingConfig,
|
|
17
|
+
BBoxSmoothingTracker
|
|
18
|
+
)
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class AntiSpoofingDetectionConfig(BaseConfig):
|
|
25
|
+
"""Configuration for anti-spoofing detection use case."""
|
|
26
|
+
# Smoothing configuration
|
|
27
|
+
enable_smoothing: bool = True
|
|
28
|
+
smoothing_algorithm: str = "observability" # "window" or "observability"
|
|
29
|
+
smoothing_window_size: int = 20
|
|
30
|
+
smoothing_cooldown_frames: int = 5
|
|
31
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
32
|
+
|
|
33
|
+
# Confidence thresholds
|
|
34
|
+
confidence_threshold: float = 0.6
|
|
35
|
+
|
|
36
|
+
usecase_categories: List[str] = field(
|
|
37
|
+
default_factory=lambda: [
|
|
38
|
+
'Zac Efron', 'Natalie Portman', 'Courtney Cox', 'Henry Cavill', 'Lisa Kudrow',
|
|
39
|
+
'Alia Bhatt', 'Roger Federer', 'Charlize Theron', 'Anushka Sharma', 'Billie Eilish',
|
|
40
|
+
'Vijay Deverakonda', 'Camila Cabello', 'Ellen Degeneres', 'Priyanka Chopra', 'Tom Cruise',
|
|
41
|
+
'Margot Robbie', 'Claire Holt', 'Hugh Jackman', 'Jessica Alba', 'Elizabeth Olsen',
|
|
42
|
+
'Akshay Kumar', 'Amitabh Bachchan', 'Virat Kohli', 'Andy Samberg', 'Brad Pitt',
|
|
43
|
+
'Dwayne Johnson', 'Alexandra Daddario', 'Hrithik Roshan', 'Robert Downey Jr'
|
|
44
|
+
]
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
target_categories: List[str] = field(
|
|
48
|
+
default_factory=lambda: [
|
|
49
|
+
'Zac Efron', 'Natalie Portman', 'Courtney Cox', 'Henry Cavill', 'Lisa Kudrow',
|
|
50
|
+
'Alia Bhatt', 'Roger Federer', 'Charlize Theron', 'Anushka Sharma', 'Billie Eilish',
|
|
51
|
+
'Vijay Deverakonda', 'Camila Cabello', 'Ellen Degeneres', 'Priyanka Chopra', 'Tom Cruise',
|
|
52
|
+
'Margot Robbie', 'Claire Holt', 'Hugh Jackman', 'Jessica Alba', 'Elizabeth Olsen',
|
|
53
|
+
'Akshay Kumar', 'Amitabh Bachchan', 'Virat Kohli', 'Andy Samberg', 'Brad Pitt',
|
|
54
|
+
'Dwayne Johnson', 'Alexandra Daddario', 'Hrithik Roshan', 'Robert Downey Jr'
|
|
55
|
+
]
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
alert_config: Optional[AlertConfig] = None
|
|
59
|
+
|
|
60
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
|
61
|
+
default_factory=lambda: {
|
|
62
|
+
i: cat for i, cat in enumerate([
|
|
63
|
+
'Zac Efron', 'Natalie Portman', 'Courtney Cox', 'Henry Cavill', 'Lisa Kudrow',
|
|
64
|
+
'Alia Bhatt', 'Roger Federer', 'Charlize Theron', 'Anushka Sharma', 'Billie Eilish',
|
|
65
|
+
'Vijay Deverakonda', 'Camila Cabello', 'Ellen Degeneres', 'Priyanka Chopra', 'Tom Cruise',
|
|
66
|
+
'Margot Robbie', 'Claire Holt', 'Hugh Jackman', 'Jessica Alba', 'Elizabeth Olsen',
|
|
67
|
+
'Akshay Kumar', 'Amitabh Bachchan', 'Virat Kohli', 'Andy Samberg', 'Brad Pitt',
|
|
68
|
+
'Dwayne Johnson', 'Alexandra Daddario', 'Hrithik Roshan', 'Robert Downey Jr'
|
|
69
|
+
])
|
|
70
|
+
}
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class AntiSpoofingDetectionUseCase(BaseProcessor):
|
|
75
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
|
76
|
+
"""
|
|
77
|
+
Get detailed information about track IDs (per frame).
|
|
78
|
+
"""
|
|
79
|
+
frame_track_ids = set()
|
|
80
|
+
for det in detections:
|
|
81
|
+
tid = det.get('track_id')
|
|
82
|
+
if tid is not None:
|
|
83
|
+
frame_track_ids.add(tid)
|
|
84
|
+
total_track_ids = set()
|
|
85
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
|
86
|
+
total_track_ids.update(s)
|
|
87
|
+
return {
|
|
88
|
+
"total_count": len(total_track_ids),
|
|
89
|
+
"current_frame_count": len(frame_track_ids),
|
|
90
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
91
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
92
|
+
"last_update_time": time.time(),
|
|
93
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
def _update_tracking_state(self, detections: list):
|
|
97
|
+
"""
|
|
98
|
+
Track unique categories track_ids per category for total count after tracking.
|
|
99
|
+
"""
|
|
100
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
|
101
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
|
102
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
103
|
+
|
|
104
|
+
for det in detections:
|
|
105
|
+
cat = det.get("category")
|
|
106
|
+
raw_track_id = det.get("track_id")
|
|
107
|
+
if cat not in self.target_categories or raw_track_id is None:
|
|
108
|
+
continue
|
|
109
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
110
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
111
|
+
det["track_id"] = canonical_id
|
|
112
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
113
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
|
114
|
+
|
|
115
|
+
def get_total_counts(self):
|
|
116
|
+
"""
|
|
117
|
+
Return total unique track_id count for each category.
|
|
118
|
+
"""
|
|
119
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
|
120
|
+
|
|
121
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
122
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
123
|
+
hours = int(timestamp // 3600)
|
|
124
|
+
minutes = int((timestamp % 3600) // 60)
|
|
125
|
+
seconds = timestamp % 60
|
|
126
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
|
|
127
|
+
|
|
128
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
129
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
|
130
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
131
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
132
|
+
|
|
133
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
|
|
134
|
+
"""Get formatted current timestamp based on stream type."""
|
|
135
|
+
if not stream_info:
|
|
136
|
+
return "00:00:00.00"
|
|
137
|
+
if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
|
138
|
+
stream_time_str = stream_info.get("video_timestamp", "")
|
|
139
|
+
return stream_time_str[:8]
|
|
140
|
+
else:
|
|
141
|
+
stream_time_str = stream_info.get("stream_time", "")
|
|
142
|
+
if stream_time_str:
|
|
143
|
+
try:
|
|
144
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
145
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
146
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
147
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
148
|
+
except:
|
|
149
|
+
return self._format_timestamp_for_stream(time.time())
|
|
150
|
+
else:
|
|
151
|
+
return self._format_timestamp_for_stream(time.time())
|
|
152
|
+
|
|
153
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
|
|
154
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
155
|
+
if not stream_info:
|
|
156
|
+
return "00:00:00"
|
|
157
|
+
is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
|
158
|
+
if is_video_chunk or stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
|
159
|
+
return "00:00:00"
|
|
160
|
+
else:
|
|
161
|
+
if self._tracking_start_time is None:
|
|
162
|
+
stream_time_str = stream_info.get("stream_time", "")
|
|
163
|
+
if stream_time_str:
|
|
164
|
+
try:
|
|
165
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
166
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
167
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
168
|
+
except:
|
|
169
|
+
self._tracking_start_time = time.time()
|
|
170
|
+
else:
|
|
171
|
+
self._tracking_start_time = time.time()
|
|
172
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
173
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
174
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
175
|
+
|
|
176
|
+
def __init__(self):
|
|
177
|
+
super().__init__("anti_spoofing_detection")
|
|
178
|
+
self.category = "security"
|
|
179
|
+
self.target_categories = [
|
|
180
|
+
'Zac Efron', 'Natalie Portman', 'Courtney Cox', 'Henry Cavill', 'Lisa Kudrow',
|
|
181
|
+
'Alia Bhatt', 'Roger Federer', 'Charlize Theron', 'Anushka Sharma', 'Billie Eilish',
|
|
182
|
+
'Vijay Deverakonda', 'Camila Cabello', 'Ellen Degeneres', 'Priyanka Chopra', 'Tom Cruise',
|
|
183
|
+
'Margot Robbie', 'Claire Holt', 'Hugh Jackman', 'Jessica Alba', 'Elizabeth Olsen',
|
|
184
|
+
'Akshay Kumar', 'Amitabh Bachchan', 'Virat Kohli', 'Andy Samberg', 'Brad Pitt',
|
|
185
|
+
'Dwayne Johnson', 'Alexandra Daddario', 'Hrithik Roshan', 'Robert Downey Jr'
|
|
186
|
+
]
|
|
187
|
+
self.smoothing_tracker = None
|
|
188
|
+
self.tracker = None
|
|
189
|
+
self._total_frame_counter = 0
|
|
190
|
+
self._global_frame_offset = 0
|
|
191
|
+
self._tracking_start_time = None
|
|
192
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
193
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
194
|
+
self._track_merge_iou_threshold: float = 0.05
|
|
195
|
+
self._track_merge_time_window: float = 7.0
|
|
196
|
+
|
|
197
|
+
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
|
198
|
+
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
199
|
+
start_time = time.time()
|
|
200
|
+
if not isinstance(config, AntiSpoofingDetectionConfig):
|
|
201
|
+
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
|
|
202
|
+
if context is None:
|
|
203
|
+
context = ProcessingContext()
|
|
204
|
+
input_format = match_results_structure(data)
|
|
205
|
+
context.input_format = input_format
|
|
206
|
+
context.confidence_threshold = config.confidence_threshold
|
|
207
|
+
|
|
208
|
+
if config.confidence_threshold is not None:
|
|
209
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
210
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
211
|
+
else:
|
|
212
|
+
processed_data = data
|
|
213
|
+
self.logger.debug("Did not apply confidence filtering with threshold since nothing was provided")
|
|
214
|
+
|
|
215
|
+
if config.index_to_category:
|
|
216
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
217
|
+
self.logger.debug("Applied category mapping")
|
|
218
|
+
|
|
219
|
+
if config.target_categories:
|
|
220
|
+
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
221
|
+
self.logger.debug("Applied category filtering")
|
|
222
|
+
|
|
223
|
+
if config.enable_smoothing:
|
|
224
|
+
if self.smoothing_tracker is None:
|
|
225
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
226
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
227
|
+
window_size=config.smoothing_window_size,
|
|
228
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
229
|
+
confidence_threshold=config.confidence_threshold,
|
|
230
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
231
|
+
enable_smoothing=True
|
|
232
|
+
)
|
|
233
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
234
|
+
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
235
|
+
|
|
236
|
+
try:
|
|
237
|
+
from ..advanced_tracker import AdvancedTracker
|
|
238
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
239
|
+
if self.tracker is None:
|
|
240
|
+
tracker_config = TrackerConfig()
|
|
241
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
242
|
+
self.logger.info("Initialized AdvancedTracker for Anti-Spoofing Detection")
|
|
243
|
+
processed_data = self.tracker.update(processed_data)
|
|
244
|
+
except Exception as e:
|
|
245
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
246
|
+
|
|
247
|
+
self._update_tracking_state(processed_data)
|
|
248
|
+
self._total_frame_counter += 1
|
|
249
|
+
|
|
250
|
+
frame_number = None
|
|
251
|
+
if stream_info:
|
|
252
|
+
input_settings = stream_info.get("input_settings", {})
|
|
253
|
+
start_frame = input_settings.get("start_frame")
|
|
254
|
+
end_frame = input_settings.get("end_frame")
|
|
255
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
256
|
+
frame_number = start_frame
|
|
257
|
+
|
|
258
|
+
general_counting_summary = calculate_counting_summary(data)
|
|
259
|
+
counting_summary = self._count_categories(processed_data, config)
|
|
260
|
+
total_counts = self.get_total_counts()
|
|
261
|
+
counting_summary['total_counts'] = total_counts
|
|
262
|
+
insights = self._generate_insights(counting_summary, config)
|
|
263
|
+
alerts = self._check_alerts(counting_summary, config)
|
|
264
|
+
predictions = self._extract_predictions(processed_data)
|
|
265
|
+
summary = self._generate_summary(counting_summary, alerts)
|
|
266
|
+
|
|
267
|
+
events_list = self._generate_events(counting_summary, alerts, config, frame_number, stream_info)
|
|
268
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, insights, summary, config, frame_number, stream_info)
|
|
269
|
+
|
|
270
|
+
events = events_list[0] if events_list else {}
|
|
271
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
272
|
+
context.mark_completed()
|
|
273
|
+
|
|
274
|
+
result = self.create_result(
|
|
275
|
+
data={
|
|
276
|
+
"counting_summary": counting_summary,
|
|
277
|
+
"general_counting_summary": general_counting_summary,
|
|
278
|
+
"alerts": alerts,
|
|
279
|
+
"total_detections": counting_summary.get("total_count", 0),
|
|
280
|
+
"events": events,
|
|
281
|
+
"tracking_stats": tracking_stats,
|
|
282
|
+
},
|
|
283
|
+
usecase=self.name,
|
|
284
|
+
category=self.category,
|
|
285
|
+
context=context
|
|
286
|
+
)
|
|
287
|
+
result.summary = summary
|
|
288
|
+
result.insights = insights
|
|
289
|
+
result.predictions = predictions
|
|
290
|
+
return result
|
|
291
|
+
|
|
292
|
+
def _generate_events(self, counting_summary: Dict, alerts: List, config: AntiSpoofingDetectionConfig,
|
|
293
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
294
|
+
"""Generate structured events for the output format with frame-based keys."""
|
|
295
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
296
|
+
events = [{frame_key: []}]
|
|
297
|
+
frame_events = events[0][frame_key]
|
|
298
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
299
|
+
|
|
300
|
+
if total_detections > 0:
|
|
301
|
+
level = "info"
|
|
302
|
+
intensity = 5.0
|
|
303
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
304
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
305
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
306
|
+
if intensity >= 7:
|
|
307
|
+
level = "critical"
|
|
308
|
+
elif intensity >= 5:
|
|
309
|
+
level = "warning"
|
|
310
|
+
else:
|
|
311
|
+
level = "info"
|
|
312
|
+
else:
|
|
313
|
+
if total_detections > 25:
|
|
314
|
+
level = "critical"
|
|
315
|
+
intensity = 9.0
|
|
316
|
+
elif total_detections > 15:
|
|
317
|
+
level = "warning"
|
|
318
|
+
intensity = 7.0
|
|
319
|
+
else:
|
|
320
|
+
level = "info"
|
|
321
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
322
|
+
|
|
323
|
+
human_text_lines = ["EVENTS DETECTED:"]
|
|
324
|
+
human_text_lines.append(f" - {total_detections} identities detected [INFO]")
|
|
325
|
+
human_text = "\n".join(human_text_lines)
|
|
326
|
+
|
|
327
|
+
event = {
|
|
328
|
+
"type": "anti_spoofing_detection",
|
|
329
|
+
"stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
|
|
330
|
+
"level": level,
|
|
331
|
+
"intensity": round(intensity, 1),
|
|
332
|
+
"config": {
|
|
333
|
+
"min_value": 0,
|
|
334
|
+
"max_value": 10,
|
|
335
|
+
"level_settings": {"info": 2, "warning": 5, "critical": 7}
|
|
336
|
+
},
|
|
337
|
+
"application_name": "Anti-Spoofing Detection System",
|
|
338
|
+
"application_version": "1.2",
|
|
339
|
+
"location_info": None,
|
|
340
|
+
"human_text": human_text
|
|
341
|
+
}
|
|
342
|
+
frame_events.append(event)
|
|
343
|
+
|
|
344
|
+
for alert in alerts:
|
|
345
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
346
|
+
intensity_message = "ALERT: Low identity detection in the scene"
|
|
347
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
348
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
349
|
+
percentage = (total_detections / threshold) * 100 if threshold > 0 else 0
|
|
350
|
+
if percentage < 20:
|
|
351
|
+
intensity_message = "ALERT: Low identity detection in the scene"
|
|
352
|
+
elif percentage <= 50:
|
|
353
|
+
intensity_message = "ALERT: Moderate identity detection in the scene"
|
|
354
|
+
elif percentage <= 70:
|
|
355
|
+
intensity_message = "ALERT: High identity detection in the scene"
|
|
356
|
+
else:
|
|
357
|
+
intensity_message = "ALERT: Severe identity detection in the scene"
|
|
358
|
+
else:
|
|
359
|
+
if total_detections > 15:
|
|
360
|
+
intensity_message = "ALERT: High identity detection in the scene"
|
|
361
|
+
elif total_detections == 1:
|
|
362
|
+
intensity_message = "ALERT: Low identity detection in the scene"
|
|
363
|
+
else:
|
|
364
|
+
intensity_message = "ALERT: Moderate identity detection in the scene"
|
|
365
|
+
|
|
366
|
+
alert_event = {
|
|
367
|
+
"type": alert.get("type", "identity_alert"),
|
|
368
|
+
"stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
|
|
369
|
+
"level": alert.get("severity", "warning"),
|
|
370
|
+
"intensity": 8.0,
|
|
371
|
+
"config": {
|
|
372
|
+
"min_value": 0,
|
|
373
|
+
"max_value": 10,
|
|
374
|
+
"level_settings": {"info": 2, "warning": 5, "critical": 7}
|
|
375
|
+
},
|
|
376
|
+
"application_name": "Identity Alert System",
|
|
377
|
+
"application_version": "1.2",
|
|
378
|
+
"location_info": alert.get("zone"),
|
|
379
|
+
"human_text": f"{datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')} : {intensity_message}"
|
|
380
|
+
}
|
|
381
|
+
frame_events.append(alert_event)
|
|
382
|
+
|
|
383
|
+
return events
|
|
384
|
+
|
|
385
|
+
def _generate_tracking_stats(self, counting_summary: Dict, insights: List[str], summary: str,
|
|
386
|
+
config: AntiSpoofingDetectionConfig, frame_number: Optional[int] = None,
|
|
387
|
+
stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
388
|
+
"""Generate structured tracking stats for the output format with frame-based keys."""
|
|
389
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
390
|
+
tracking_stats = [{frame_key: []}]
|
|
391
|
+
frame_tracking_stats = tracking_stats[0][frame_key]
|
|
392
|
+
|
|
393
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
394
|
+
total_counts = counting_summary.get("total_counts", {})
|
|
395
|
+
cumulative_total = sum(total_counts.values()) if total_counts else 0
|
|
396
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
397
|
+
|
|
398
|
+
track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
|
|
399
|
+
|
|
400
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
401
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
402
|
+
|
|
403
|
+
human_text_lines = []
|
|
404
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
405
|
+
if total_detections > 0:
|
|
406
|
+
category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
|
|
407
|
+
if len(category_counts) == 1:
|
|
408
|
+
detection_text = category_counts[0] + " detected"
|
|
409
|
+
elif len(category_counts) == 2:
|
|
410
|
+
detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
|
|
411
|
+
else:
|
|
412
|
+
detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
|
413
|
+
human_text_lines.append(f"\t- {detection_text}")
|
|
414
|
+
else:
|
|
415
|
+
human_text_lines.append(f"\t- No detections")
|
|
416
|
+
|
|
417
|
+
human_text_lines.append("")
|
|
418
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
|
|
419
|
+
human_text_lines.append(f"\t- Total Detection: {cumulative_total}")
|
|
420
|
+
if total_counts:
|
|
421
|
+
for cat, count in total_counts.items():
|
|
422
|
+
if count > 0:
|
|
423
|
+
human_text_lines.append(f"\t- {cat} detected")
|
|
424
|
+
|
|
425
|
+
human_text = "\n".join(human_text_lines)
|
|
426
|
+
|
|
427
|
+
tracking_stat = {
|
|
428
|
+
"type": "anti_spoofing_detection",
|
|
429
|
+
"category": "security",
|
|
430
|
+
"count": total_detections,
|
|
431
|
+
"insights": insights,
|
|
432
|
+
"summary": summary,
|
|
433
|
+
"timestamp": datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC'),
|
|
434
|
+
"human_text": human_text,
|
|
435
|
+
"track_ids_info": track_ids_info,
|
|
436
|
+
"global_frame_offset": getattr(self, '_global_frame_offset', 0),
|
|
437
|
+
"local_frame_id": frame_key,
|
|
438
|
+
"detections": counting_summary.get("detections", [])
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
frame_tracking_stats.append(tracking_stat)
|
|
442
|
+
return tracking_stats
|
|
443
|
+
|
|
444
|
+
def _count_categories(self, detections: list, config: AntiSpoofingDetectionConfig) -> dict:
|
|
445
|
+
"""
|
|
446
|
+
Count the number of detections per category and return a summary dict.
|
|
447
|
+
"""
|
|
448
|
+
counts = {}
|
|
449
|
+
for det in detections:
|
|
450
|
+
cat = det.get('category', 'unknown')
|
|
451
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
452
|
+
return {
|
|
453
|
+
"total_count": sum(counts.values()),
|
|
454
|
+
"per_category_count": counts,
|
|
455
|
+
"detections": [
|
|
456
|
+
{
|
|
457
|
+
"bounding_box": det.get("bounding_box"),
|
|
458
|
+
"category": det.get("category"),
|
|
459
|
+
"confidence": det.get("confidence"),
|
|
460
|
+
"track_id": det.get("track_id"),
|
|
461
|
+
"frame_id": det.get("frame_id")
|
|
462
|
+
}
|
|
463
|
+
for det in detections
|
|
464
|
+
]
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
CATEGORY_DISPLAY = {
|
|
468
|
+
cat: cat.replace(' ', '_').lower() for cat in [
|
|
469
|
+
'Zac Efron', 'Natalie Portman', 'Courtney Cox', 'Henry Cavill', 'Lisa Kudrow',
|
|
470
|
+
'Alia Bhatt', 'Roger Federer', 'Charlize Theron', 'Anushka Sharma', 'Billie Eilish',
|
|
471
|
+
'Vijay Deverakonda', 'Camila Cabello', 'Ellen Degeneres', 'Priyanka Chopra', 'Tom Cruise',
|
|
472
|
+
'Margot Robbie', 'Claire Holt', 'Hugh Jackman', 'Jessica Alba', 'Elizabeth Olsen',
|
|
473
|
+
'Akshay Kumar', 'Amitabh Bachchan', 'Virat Kohli', 'Andy Samberg', 'Brad Pitt',
|
|
474
|
+
'Dwayne Johnson', 'Alexandra Daddario', 'Hrithik Roshan', 'Robert Downey Jr'
|
|
475
|
+
]
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
def _generate_insights(self, summary: dict, config: AntiSpoofingDetectionConfig) -> List[str]:
|
|
479
|
+
"""Generate human-readable insights for each category."""
|
|
480
|
+
|
|
481
|
+
insights = []
|
|
482
|
+
per_cat = summary.get("per_category_count", {})
|
|
483
|
+
total_detections = summary.get("total_count", 0)
|
|
484
|
+
|
|
485
|
+
if total_detections == 0:
|
|
486
|
+
insights.append("No identities detected in the scene")
|
|
487
|
+
return insights
|
|
488
|
+
insights.append(f"EVENT: Detected {total_detections} identities in the scene")
|
|
489
|
+
intensity_threshold = None
|
|
490
|
+
if config.alert_config and config.alert_config.count_thresholds and "all" in config.alert_config.count_thresholds:
|
|
491
|
+
intensity_threshold = config.alert_config.count_thresholds["all"]
|
|
492
|
+
if intensity_threshold is not None:
|
|
493
|
+
percentage = (total_detections / intensity_threshold) * 100
|
|
494
|
+
if percentage < 20:
|
|
495
|
+
insights.append(f"INTENSITY: Low identity detection in the scene ({percentage:.1f}% of capacity)")
|
|
496
|
+
elif percentage <= 50:
|
|
497
|
+
insights.append(f"INTENSITY: Moderate identity detection in the scene ({percentage:.1f}% of capacity)")
|
|
498
|
+
elif percentage <= 70:
|
|
499
|
+
insights.append(f"INTENSITY: High identity detection in the scene ({percentage:.1f}% of capacity)")
|
|
500
|
+
else:
|
|
501
|
+
insights.append(f"INTENSITY: Severe identity detection in the scene ({percentage:.1f}% of capacity)")
|
|
502
|
+
for cat, count in per_cat.items():
|
|
503
|
+
display = self.CATEGORY_DISPLAY.get(cat, cat)
|
|
504
|
+
insights.append(f"{display}:{count}")
|
|
505
|
+
return insights
|
|
506
|
+
|
|
507
|
+
def _check_alerts(self, summary: dict, config: AntiSpoofingDetectionConfig) -> List[Dict]:
|
|
508
|
+
"""
|
|
509
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
|
510
|
+
"""
|
|
511
|
+
alerts = []
|
|
512
|
+
if not config.alert_config:
|
|
513
|
+
return alerts
|
|
514
|
+
total = summary.get("total_count", 0)
|
|
515
|
+
if config.alert_config.count_thresholds:
|
|
516
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
517
|
+
if category == "all" and total >= threshold:
|
|
518
|
+
alerts.append({
|
|
519
|
+
"type": "count_threshold",
|
|
520
|
+
"severity": "warning",
|
|
521
|
+
"message": f"Total identity count ({total}) exceeds threshold ({threshold})",
|
|
522
|
+
"category": category,
|
|
523
|
+
"current_count": total,
|
|
524
|
+
"threshold": threshold
|
|
525
|
+
})
|
|
526
|
+
elif category in summary.get("per_category_count", {}):
|
|
527
|
+
count = summary.get("per_category_count", {})[category]
|
|
528
|
+
if count >= threshold:
|
|
529
|
+
alerts.append({
|
|
530
|
+
"type": "count_threshold",
|
|
531
|
+
"severity": "warning",
|
|
532
|
+
"message": f"{category} count ({count}) exceeds threshold ({threshold})",
|
|
533
|
+
"category": category,
|
|
534
|
+
"current_count": count,
|
|
535
|
+
"threshold": threshold
|
|
536
|
+
})
|
|
537
|
+
return alerts
|
|
538
|
+
|
|
539
|
+
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
|
540
|
+
"""
|
|
541
|
+
Extract prediction details for output (category, confidence, bounding box).
|
|
542
|
+
"""
|
|
543
|
+
return [
|
|
544
|
+
{
|
|
545
|
+
"category": det.get("category", "unknown"),
|
|
546
|
+
"confidence": det.get("confidence", 0.0),
|
|
547
|
+
"bounding_box": det.get("bounding_box", {})
|
|
548
|
+
}
|
|
549
|
+
for det in detections
|
|
550
|
+
]
|
|
551
|
+
|
|
552
|
+
def _generate_summary(self, summary: dict, alerts: List) -> str:
|
|
553
|
+
"""
|
|
554
|
+
Generate a human_text string for the result, including per-category insights.
|
|
555
|
+
"""
|
|
556
|
+
total = summary.get("total_count", 0)
|
|
557
|
+
per_cat = summary.get("per_category_count", {})
|
|
558
|
+
cumulative = summary.get("total_counts", {})
|
|
559
|
+
cumulative_total = sum(cumulative.values()) if cumulative else 0
|
|
560
|
+
lines = []
|
|
561
|
+
if total > 0:
|
|
562
|
+
lines.append(f"{total} identity detections")
|
|
563
|
+
if per_cat:
|
|
564
|
+
lines.append("detections:")
|
|
565
|
+
for cat, count in per_cat.items():
|
|
566
|
+
lines.append(f"\t{cat}:{count}")
|
|
567
|
+
else:
|
|
568
|
+
lines.append("No identity detections")
|
|
569
|
+
lines.append(f"Total identity detections: {cumulative_total}")
|
|
570
|
+
if alerts:
|
|
571
|
+
lines.append(f"{len(alerts)} alert(s)")
|
|
572
|
+
return "\n".join(lines)
|
|
573
|
+
|
|
574
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
575
|
+
"""Compute IoU between two bounding boxes which may be dicts or lists."""
|
|
576
|
+
def _bbox_to_list(bbox):
|
|
577
|
+
if bbox is None:
|
|
578
|
+
return []
|
|
579
|
+
if isinstance(bbox, list):
|
|
580
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
581
|
+
if isinstance(bbox, dict):
|
|
582
|
+
if "xmin" in bbox:
|
|
583
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
584
|
+
if "x1" in bbox:
|
|
585
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
586
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
587
|
+
return values[:4] if len(values) >= 4 else []
|
|
588
|
+
return []
|
|
589
|
+
|
|
590
|
+
l1 = _bbox_to_list(box1)
|
|
591
|
+
l2 = _bbox_to_list(box2)
|
|
592
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
593
|
+
return 0.0
|
|
594
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
595
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
596
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
597
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
598
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
599
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
600
|
+
inter_x_min = max(x1_min, x2_min)
|
|
601
|
+
inter_y_min = max(y1_min, y2_min)
|
|
602
|
+
inter_x_max = min(x1_max, x2_max)
|
|
603
|
+
inter_y_max = min(y1_max, y2_max)
|
|
604
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
605
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
606
|
+
inter_area = inter_w * inter_h
|
|
607
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
608
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
609
|
+
union_area = area1 + area2 - inter_area
|
|
610
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
611
|
+
|
|
612
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
613
|
+
"""Return a stable canonical ID for a raw tracker ID."""
|
|
614
|
+
if raw_id is None or bbox is None:
|
|
615
|
+
return raw_id
|
|
616
|
+
now = time.time()
|
|
617
|
+
if raw_id in self._track_aliases:
|
|
618
|
+
canonical_id = self._track_aliases[raw_id]
|
|
619
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
620
|
+
if track_info is not None:
|
|
621
|
+
track_info["last_bbox"] = bbox
|
|
622
|
+
track_info["last_update"] = now
|
|
623
|
+
track_info["raw_ids"].add(raw_id)
|
|
624
|
+
return canonical_id
|
|
625
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
626
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
627
|
+
continue
|
|
628
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
629
|
+
if iou >= self._track_merge_iou_threshold:
|
|
630
|
+
self._track_aliases[raw_id] = canonical_id
|
|
631
|
+
info["last_bbox"] = bbox
|
|
632
|
+
info["last_update"] = now
|
|
633
|
+
info["raw_ids"].add(raw_id)
|
|
634
|
+
return canonical_id
|
|
635
|
+
canonical_id = raw_id
|
|
636
|
+
self._track_aliases[raw_id] = canonical_id
|
|
637
|
+
self._canonical_tracks[canonical_id] = {
|
|
638
|
+
"last_bbox": bbox,
|
|
639
|
+
"last_update": now,
|
|
640
|
+
"raw_ids": {raw_id},
|
|
641
|
+
}
|
|
642
|
+
return canonical_id
|
|
643
|
+
|
|
644
|
+
def _format_timestamp(self, timestamp: float) -> str:
|
|
645
|
+
"""Format a timestamp for human-readable output."""
|
|
646
|
+
return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
|
|
647
|
+
|
|
648
|
+
def _get_tracking_start_time(self) -> str:
|
|
649
|
+
"""Get the tracking start time, formatted as a string."""
|
|
650
|
+
if self._tracking_start_time is None:
|
|
651
|
+
return "N/A"
|
|
652
|
+
return self._format_timestamp(self._tracking_start_time)
|
|
653
|
+
|
|
654
|
+
def _set_tracking_start_time(self) -> None:
|
|
655
|
+
"""Set the tracking start time to the current time."""
|
|
656
|
+
self._tracking_start_time = time.time()
|