matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,771 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Weapon Detection Use Case for Post-Processing
|
|
3
|
+
|
|
4
|
+
This module provides weapon detection functionality with tracking, counting, and alert generation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
import time
|
|
10
|
+
from datetime import datetime, timezone
|
|
11
|
+
|
|
12
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
|
|
13
|
+
from ..utils import (
|
|
14
|
+
filter_by_confidence,
|
|
15
|
+
filter_by_categories,
|
|
16
|
+
apply_category_mapping,
|
|
17
|
+
count_objects_by_category,
|
|
18
|
+
calculate_counting_summary,
|
|
19
|
+
match_results_structure,
|
|
20
|
+
bbox_smoothing,
|
|
21
|
+
BBoxSmoothingConfig,
|
|
22
|
+
BBoxSmoothingTracker
|
|
23
|
+
)
|
|
24
|
+
from ..core.config import BaseConfig, AlertConfig
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class WeaponDetectionConfig(BaseConfig):
|
|
29
|
+
"""Configuration for weapon detection use case."""
|
|
30
|
+
# Smoothing configuration
|
|
31
|
+
enable_smoothing: bool = True
|
|
32
|
+
smoothing_algorithm: str = "observability"
|
|
33
|
+
smoothing_window_size: int = 20
|
|
34
|
+
smoothing_cooldown_frames: int = 5
|
|
35
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
36
|
+
|
|
37
|
+
# Confidence threshold
|
|
38
|
+
confidence_threshold: float = 0.45
|
|
39
|
+
|
|
40
|
+
# Categories to detect and track
|
|
41
|
+
usecase_categories: List[str] = field(
|
|
42
|
+
default_factory=lambda: ['billete', 'bluntweapon', 'glass', 'gun', 'knife', 'monedero', 'pistol', 'smartphone', 'tarjeta']
|
|
43
|
+
)
|
|
44
|
+
target_categories: List[str] = field(
|
|
45
|
+
default_factory=lambda: ['bluntweapon', 'glass', 'gun', 'knife', 'monedero', 'pistol', 'tarjeta']
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
alert_config: Optional[AlertConfig] = None
|
|
49
|
+
|
|
50
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
|
51
|
+
default_factory=lambda: {
|
|
52
|
+
0: "billete",
|
|
53
|
+
1: "bluntweapon",
|
|
54
|
+
2: "glass",
|
|
55
|
+
3: "gun",
|
|
56
|
+
4: "knife",
|
|
57
|
+
5: "monedero",
|
|
58
|
+
6: "pistol",
|
|
59
|
+
7: "smartphone",
|
|
60
|
+
8: "tarjeta"
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class WeaponDetectionUseCase(BaseProcessor):
|
|
66
|
+
def __init__(self):
|
|
67
|
+
super().__init__("weapon_detection")
|
|
68
|
+
self.category = "security"
|
|
69
|
+
self.CASE_TYPE: Optional[str] = 'weapon_detection'
|
|
70
|
+
self.CASE_VERSION: Optional[str] = '1.0'
|
|
71
|
+
|
|
72
|
+
# List of categories to track
|
|
73
|
+
self.target_categories = ['bluntweapon', 'glass', 'gun', 'knife', 'monedero', 'pistol', 'tarjeta']
|
|
74
|
+
|
|
75
|
+
# Initialize smoothing tracker
|
|
76
|
+
self.smoothing_tracker = None
|
|
77
|
+
|
|
78
|
+
# Initialize advanced tracker (will be created on first use)
|
|
79
|
+
self.tracker = None
|
|
80
|
+
|
|
81
|
+
# Initialize tracking state variables
|
|
82
|
+
self._total_frame_counter = 0
|
|
83
|
+
self._global_frame_offset = 0
|
|
84
|
+
self.start_timer = None
|
|
85
|
+
|
|
86
|
+
# Track start time for "TOTAL SINCE" calculation
|
|
87
|
+
self._tracking_start_time = None
|
|
88
|
+
|
|
89
|
+
self._ascending_alert_list: List[int] = []
|
|
90
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
91
|
+
|
|
92
|
+
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
|
93
|
+
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
94
|
+
"""
|
|
95
|
+
Main entry point for weapon detection post-processing.
|
|
96
|
+
Applies category mapping, smoothing, tracking, counting, alerting, and summary generation.
|
|
97
|
+
"""
|
|
98
|
+
start_time = time.time()
|
|
99
|
+
if not isinstance(config, WeaponDetectionConfig):
|
|
100
|
+
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
|
|
101
|
+
if context is None:
|
|
102
|
+
context = ProcessingContext()
|
|
103
|
+
|
|
104
|
+
# Detect input format and store in context
|
|
105
|
+
input_format = match_results_structure(data)
|
|
106
|
+
context.input_format = input_format
|
|
107
|
+
|
|
108
|
+
if isinstance(config.confidence_threshold, str):
|
|
109
|
+
config.confidence_threshold = float(config.confidence_threshold)
|
|
110
|
+
|
|
111
|
+
context.confidence_threshold = config.confidence_threshold
|
|
112
|
+
self.logger.info(f"Processing weapon detection with format: {input_format.value}")
|
|
113
|
+
|
|
114
|
+
# Step 1: Apply confidence filtering
|
|
115
|
+
if config.confidence_threshold is not None:
|
|
116
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
117
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
118
|
+
else:
|
|
119
|
+
processed_data = data
|
|
120
|
+
self.logger.debug("No confidence filtering applied")
|
|
121
|
+
|
|
122
|
+
# Step 2: Apply category mapping
|
|
123
|
+
if config.index_to_category:
|
|
124
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
125
|
+
self.logger.debug("Applied category mapping")
|
|
126
|
+
|
|
127
|
+
# Step 3: Filter by target categories
|
|
128
|
+
if config.target_categories:
|
|
129
|
+
processed_data = filter_by_categories(processed_data, config.target_categories)
|
|
130
|
+
self.logger.debug(f"Filtered by target categories: {config.target_categories}")
|
|
131
|
+
|
|
132
|
+
# Step 4: Log alerts for detected weapons
|
|
133
|
+
for detection in processed_data:
|
|
134
|
+
if detection.get('category') in config.target_categories:
|
|
135
|
+
self.logger.warning(f"ALERT: {detection.get('category')} detected at {self._get_current_timestamp_str(stream_info)}")
|
|
136
|
+
|
|
137
|
+
# Step 5: Apply bbox smoothing if enabled
|
|
138
|
+
if config.enable_smoothing:
|
|
139
|
+
if self.smoothing_tracker is None:
|
|
140
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
141
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
142
|
+
window_size=config.smoothing_window_size,
|
|
143
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
144
|
+
confidence_threshold=config.confidence_threshold,
|
|
145
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
146
|
+
enable_smoothing=True
|
|
147
|
+
)
|
|
148
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
149
|
+
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
150
|
+
self.logger.debug(f"After bbox smoothing: {processed_data}")
|
|
151
|
+
|
|
152
|
+
# Step 6: Apply advanced tracking
|
|
153
|
+
try:
|
|
154
|
+
from ..advanced_tracker import AdvancedTracker
|
|
155
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
156
|
+
if self.tracker is None:
|
|
157
|
+
tracker_config = TrackerConfig()
|
|
158
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
159
|
+
self.logger.info("Initialized AdvancedTracker for Weapon Detection")
|
|
160
|
+
processed_data = self.tracker.update(processed_data)
|
|
161
|
+
except Exception as e:
|
|
162
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
163
|
+
|
|
164
|
+
# Step 7: Update tracking state
|
|
165
|
+
self._update_tracking_state(processed_data)
|
|
166
|
+
self._total_frame_counter += 1
|
|
167
|
+
|
|
168
|
+
# Step 8: Extract frame information
|
|
169
|
+
frame_number = None
|
|
170
|
+
if stream_info:
|
|
171
|
+
input_settings = stream_info.get("input_settings", {})
|
|
172
|
+
start_frame = input_settings.get("start_frame")
|
|
173
|
+
end_frame = input_settings.get("end_frame")
|
|
174
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
175
|
+
frame_number = start_frame
|
|
176
|
+
|
|
177
|
+
# Step 9: Compute summaries and alerts
|
|
178
|
+
|
|
179
|
+
counting_summary = self._count_categories(processed_data, config)
|
|
180
|
+
counting_summary['total_counts'] = self.get_total_counts()
|
|
181
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
|
182
|
+
predictions = self._extract_predictions(processed_data)
|
|
183
|
+
|
|
184
|
+
# Step 10: Generate structured outputs
|
|
185
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
|
186
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
|
187
|
+
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
|
|
188
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
189
|
+
|
|
190
|
+
# Step 11: Build result
|
|
191
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
192
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
193
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
194
|
+
summary = summary_list[0] if summary_list else {}
|
|
195
|
+
agg_summary = {str(frame_number): {
|
|
196
|
+
"incidents": incidents,
|
|
197
|
+
"tracking_stats": tracking_stats,
|
|
198
|
+
"business_analytics": business_analytics,
|
|
199
|
+
"alerts": alerts,
|
|
200
|
+
"human_text": summary
|
|
201
|
+
}}
|
|
202
|
+
|
|
203
|
+
context.mark_completed()
|
|
204
|
+
result = self.create_result(
|
|
205
|
+
data={"agg_summary": agg_summary},
|
|
206
|
+
usecase=self.name,
|
|
207
|
+
category=self.category,
|
|
208
|
+
context=context
|
|
209
|
+
)
|
|
210
|
+
self.logger.debug(f"Final result: {result}")
|
|
211
|
+
return result
|
|
212
|
+
|
|
213
|
+
# def _count_categories(self, detections: List, config: WeaponDetectionConfig) -> Dict[str, Any]:
|
|
214
|
+
# """
|
|
215
|
+
# Count the number of detections per category and return a summary dict.
|
|
216
|
+
# """
|
|
217
|
+
# counts = {}
|
|
218
|
+
# for det in detections:
|
|
219
|
+
# cat = det.get('category', 'unknown')
|
|
220
|
+
# counts[cat] = counts.get(cat, 0) + 1
|
|
221
|
+
# # Each detection dict will now include 'track_id' (and possibly 'frame_id')
|
|
222
|
+
# return {
|
|
223
|
+
# "total_count": sum(counts.values()),
|
|
224
|
+
# "per_category_count": counts,
|
|
225
|
+
# "detections": [
|
|
226
|
+
# {
|
|
227
|
+
# "bounding_box": det.get("bounding_box"),
|
|
228
|
+
# "category": "weapon",
|
|
229
|
+
# "confidence": det.get("confidence"),
|
|
230
|
+
# "track_id": det.get("track_id"),
|
|
231
|
+
# "frame_id": det.get("frame_id")
|
|
232
|
+
# }
|
|
233
|
+
# for det in detections
|
|
234
|
+
# ]
|
|
235
|
+
# }
|
|
236
|
+
|
|
237
|
+
def _count_categories(self, detections: List[Dict], config: WeaponDetectionConfig) -> Dict[str, Any]:
|
|
238
|
+
"""Count unique licence-plate texts per frame and attach detections."""
|
|
239
|
+
# unique_texts: set = set()
|
|
240
|
+
valid_detections: List[Dict[str, Any]] = []
|
|
241
|
+
|
|
242
|
+
# Group detections by track_id for per-track dominance
|
|
243
|
+
tracks: Dict[Any, List[Dict[str, Any]]] = {}
|
|
244
|
+
for det in detections:
|
|
245
|
+
if not all(k in det for k in ['category', 'confidence', 'bounding_box']):
|
|
246
|
+
continue
|
|
247
|
+
tid = det.get('track_id')
|
|
248
|
+
if tid is None:
|
|
249
|
+
# If no track id, treat as its own pseudo-track keyed by bbox
|
|
250
|
+
tid = (det.get("bounding_box") or det.get("bbox"))
|
|
251
|
+
tracks.setdefault(tid, []).append(det)
|
|
252
|
+
|
|
253
|
+
for tid, dets in tracks.items():
|
|
254
|
+
# Pick a representative bbox (first occurrence)
|
|
255
|
+
rep = dets[0]
|
|
256
|
+
cat = "weapon"
|
|
257
|
+
bbox = rep.get('bounding_box')
|
|
258
|
+
conf = rep.get('confidence')
|
|
259
|
+
frame_id = rep.get('frame_id')
|
|
260
|
+
|
|
261
|
+
# # Compute dominant text for this track from last 50% of history
|
|
262
|
+
# dominant_text = None
|
|
263
|
+
# history = self.helper.get(tid, [])
|
|
264
|
+
# if history:
|
|
265
|
+
# half = max(1, len(history) // 2)
|
|
266
|
+
# window = history[-half:]
|
|
267
|
+
# from collections import Counter as _Ctr
|
|
268
|
+
# dominant_text, _ = _Ctr(window).most_common(1)[0]
|
|
269
|
+
# elif rep.get('plate_text'):
|
|
270
|
+
# candidate = self._clean_text(rep.get('plate_text', ''))
|
|
271
|
+
# if self._min_plate_len <= len(candidate) <= 6:
|
|
272
|
+
# dominant_text = candidate
|
|
273
|
+
|
|
274
|
+
# # Fallback to already computed per-track mapping
|
|
275
|
+
# if not dominant_text:
|
|
276
|
+
# dominant_text = self.unique_plate_track.get(tid)
|
|
277
|
+
|
|
278
|
+
# # Enforce length 5–6 and uniqueness per frame
|
|
279
|
+
# if dominant_text and self._min_plate_len <= len(dominant_text) <= 6:
|
|
280
|
+
# unique_texts.add(dominant_text)
|
|
281
|
+
valid_detections.append({
|
|
282
|
+
"bounding_box": bbox,
|
|
283
|
+
"category": cat,
|
|
284
|
+
"confidence": conf,
|
|
285
|
+
"track_id": rep.get('track_id'),
|
|
286
|
+
"frame_id": frame_id,
|
|
287
|
+
"masks": rep.get("masks", []),
|
|
288
|
+
# "plate_text": dominant_text
|
|
289
|
+
})
|
|
290
|
+
counts = {}
|
|
291
|
+
for det in detections:
|
|
292
|
+
cat = det.get('category', 'unknown')
|
|
293
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
return {
|
|
297
|
+
"total_count": sum(counts.values()),
|
|
298
|
+
"per_category_count": counts,
|
|
299
|
+
"detections": valid_detections
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: WeaponDetectionConfig,
|
|
304
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
305
|
+
"""Generate structured tracking stats."""
|
|
306
|
+
tracking_stats = []
|
|
307
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
308
|
+
total_counts_dict = counting_summary.get("total_counts", {})
|
|
309
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
310
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
|
311
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
|
312
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
313
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
314
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
315
|
+
|
|
316
|
+
# Build total_counts
|
|
317
|
+
total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
|
|
318
|
+
|
|
319
|
+
# Build current_counts
|
|
320
|
+
current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0]
|
|
321
|
+
|
|
322
|
+
# Prepare detections
|
|
323
|
+
detections = []
|
|
324
|
+
for detection in counting_summary.get("detections", []):
|
|
325
|
+
bbox = detection.get("bounding_box", {})
|
|
326
|
+
category = "weapon" #detection.get("category", "weapon")
|
|
327
|
+
segmentation = detection.get("masks", detection.get("segmentation", detection.get("mask", [])))
|
|
328
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=None)
|
|
329
|
+
detections.append(detection_obj)
|
|
330
|
+
|
|
331
|
+
# Build alert_settings
|
|
332
|
+
alert_settings = []
|
|
333
|
+
if config.alert_config:
|
|
334
|
+
alert_settings.append({
|
|
335
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
336
|
+
"incident_category": self.CASE_TYPE,
|
|
337
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
338
|
+
"ascending": True,
|
|
339
|
+
"settings": {t: v for t, v in zip(
|
|
340
|
+
getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
341
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON']
|
|
342
|
+
)}
|
|
343
|
+
})
|
|
344
|
+
|
|
345
|
+
# Generate human text
|
|
346
|
+
human_text_lines = [f"Tracking Statistics:"]
|
|
347
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
|
|
348
|
+
if total_detections > 0:
|
|
349
|
+
for cat, count in per_category_count.items():
|
|
350
|
+
if cat in self.target_categories and count > 0:
|
|
351
|
+
human_text_lines.append(f"\t{count} Weapon[s] detected")
|
|
352
|
+
else:
|
|
353
|
+
human_text_lines.append(f"\tNo Weapon[s] detected")
|
|
354
|
+
human_text_lines.append("")
|
|
355
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
|
356
|
+
for cat, count in total_counts_dict.items():
|
|
357
|
+
if cat in self.target_categories and count > 0:
|
|
358
|
+
human_text_lines.append(f"\t{count} Weapon[s] detected")
|
|
359
|
+
if alerts:
|
|
360
|
+
for alert in alerts:
|
|
361
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
|
362
|
+
else:
|
|
363
|
+
human_text_lines.append("Alerts: None")
|
|
364
|
+
human_text = "\n".join(human_text_lines)
|
|
365
|
+
|
|
366
|
+
reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
|
|
367
|
+
|
|
368
|
+
tracking_stat = self.create_tracking_stats(
|
|
369
|
+
total_counts=total_counts,
|
|
370
|
+
current_counts=current_counts,
|
|
371
|
+
detections=detections,
|
|
372
|
+
human_text=human_text,
|
|
373
|
+
camera_info=camera_info,
|
|
374
|
+
alerts=alerts,
|
|
375
|
+
alert_settings=alert_settings,
|
|
376
|
+
reset_settings=reset_settings,
|
|
377
|
+
start_time=high_precision_start_timestamp,
|
|
378
|
+
reset_time=high_precision_reset_timestamp
|
|
379
|
+
)
|
|
380
|
+
tracking_stats.append(tracking_stat)
|
|
381
|
+
return tracking_stats
|
|
382
|
+
|
|
383
|
+
def _check_alerts(self, summary: Dict, frame_number: Any, config: WeaponDetectionConfig) -> List[Dict]:
|
|
384
|
+
"""
|
|
385
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
|
386
|
+
"""
|
|
387
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
|
388
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
|
389
|
+
if len(window) < 2:
|
|
390
|
+
return True
|
|
391
|
+
increasing = sum(1 for i in range(1, len(window)) if window[i] >= window[i - 1])
|
|
392
|
+
return increasing / len(window) >= threshold
|
|
393
|
+
|
|
394
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
395
|
+
alerts = []
|
|
396
|
+
total_detections = summary.get("total_count", 0)
|
|
397
|
+
per_category_count = summary.get("per_category_count", {})
|
|
398
|
+
|
|
399
|
+
if not config.alert_config:
|
|
400
|
+
return alerts
|
|
401
|
+
|
|
402
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
403
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
404
|
+
if isinstance(threshold, str):
|
|
405
|
+
threshold = int(threshold)
|
|
406
|
+
if category == "all" and total_detections > threshold:
|
|
407
|
+
alerts.append({
|
|
408
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
409
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
410
|
+
"incident_category": self.CASE_TYPE,
|
|
411
|
+
"threshold_level": threshold,
|
|
412
|
+
"ascending": get_trend(self._ascending_alert_list),
|
|
413
|
+
"settings": {t: v for t, v in zip(
|
|
414
|
+
getattr(config.alert_config, 'alert_type', ['Default']),
|
|
415
|
+
getattr(config.alert_config, 'alert_value', ['JSON'])
|
|
416
|
+
)}
|
|
417
|
+
})
|
|
418
|
+
elif category in per_category_count and per_category_count[category] > threshold:
|
|
419
|
+
alerts.append({
|
|
420
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
421
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
422
|
+
"incident_category": self.CASE_TYPE,
|
|
423
|
+
"threshold_level": threshold,
|
|
424
|
+
"ascending": get_trend(self._ascending_alert_list),
|
|
425
|
+
"settings": {t: v for t, v in zip(
|
|
426
|
+
getattr(config.alert_config, 'alert_type', ['Default']),
|
|
427
|
+
getattr(config.alert_config, 'alert_value', ['JSON'])
|
|
428
|
+
)}
|
|
429
|
+
})
|
|
430
|
+
return alerts
|
|
431
|
+
|
|
432
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: WeaponDetectionConfig,
|
|
433
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
434
|
+
"""Generate structured incidents for the output format."""
|
|
435
|
+
incidents = []
|
|
436
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
437
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
438
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
439
|
+
|
|
440
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
441
|
+
|
|
442
|
+
if total_detections > 0:
|
|
443
|
+
level = "low"
|
|
444
|
+
intensity = 5.0
|
|
445
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
446
|
+
if start_timestamp and self.current_incident_end_timestamp == 'N/A':
|
|
447
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
448
|
+
elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
|
|
449
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
450
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
451
|
+
elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
|
|
452
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
453
|
+
|
|
454
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
455
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
456
|
+
if isinstance(threshold, str):
|
|
457
|
+
threshold = int(threshold)
|
|
458
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
459
|
+
if intensity >= 9:
|
|
460
|
+
level = "critical"
|
|
461
|
+
self._ascending_alert_list.append(3)
|
|
462
|
+
elif intensity >= 7:
|
|
463
|
+
level = "significant"
|
|
464
|
+
self._ascending_alert_list.append(2)
|
|
465
|
+
elif intensity >= 5:
|
|
466
|
+
level = "medium"
|
|
467
|
+
self._ascending_alert_list.append(1)
|
|
468
|
+
else:
|
|
469
|
+
level = "low"
|
|
470
|
+
self._ascending_alert_list.append(0)
|
|
471
|
+
else:
|
|
472
|
+
if total_detections > 30:
|
|
473
|
+
level = "critical"
|
|
474
|
+
intensity = 10.0
|
|
475
|
+
self._ascending_alert_list.append(3)
|
|
476
|
+
elif total_detections > 25:
|
|
477
|
+
level = "significant"
|
|
478
|
+
intensity = 9.0
|
|
479
|
+
self._ascending_alert_list.append(2)
|
|
480
|
+
elif total_detections > 15:
|
|
481
|
+
level = "medium"
|
|
482
|
+
intensity = 7.0
|
|
483
|
+
self._ascending_alert_list.append(1)
|
|
484
|
+
else:
|
|
485
|
+
level = "low"
|
|
486
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
487
|
+
self._ascending_alert_list.append(0)
|
|
488
|
+
|
|
489
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
490
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
|
491
|
+
human_text = "\n".join(human_text_lines)
|
|
492
|
+
|
|
493
|
+
alert_settings = []
|
|
494
|
+
if config.alert_config:
|
|
495
|
+
alert_settings.append({
|
|
496
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
497
|
+
"incident_category": self.CASE_TYPE,
|
|
498
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
499
|
+
"ascending": True,
|
|
500
|
+
"settings": {t: v for t, v in zip(
|
|
501
|
+
getattr(config.alert_config, 'alert_type', ['Default']),
|
|
502
|
+
getattr(config.alert_config, 'alert_value', ['JSON'])
|
|
503
|
+
)}
|
|
504
|
+
})
|
|
505
|
+
|
|
506
|
+
event = self.create_incident(
|
|
507
|
+
incident_id=f"{self.CASE_TYPE}_{frame_number}",
|
|
508
|
+
incident_type=self.CASE_TYPE,
|
|
509
|
+
severity_level=level,
|
|
510
|
+
human_text=human_text,
|
|
511
|
+
camera_info=camera_info,
|
|
512
|
+
alerts=alerts,
|
|
513
|
+
alert_settings=alert_settings,
|
|
514
|
+
start_time=start_timestamp,
|
|
515
|
+
end_time=self.current_incident_end_timestamp,
|
|
516
|
+
level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
|
|
517
|
+
)
|
|
518
|
+
incidents.append(event)
|
|
519
|
+
else:
|
|
520
|
+
self._ascending_alert_list.append(0)
|
|
521
|
+
incidents.append({})
|
|
522
|
+
|
|
523
|
+
return incidents
|
|
524
|
+
|
|
525
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: WeaponDetectionConfig,
|
|
526
|
+
stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
|
527
|
+
"""Generate standardized business analytics for the agg_summary structure."""
|
|
528
|
+
if is_empty:
|
|
529
|
+
return []
|
|
530
|
+
# Add business analytics logic here if needed
|
|
531
|
+
return []
|
|
532
|
+
|
|
533
|
+
def _generate_summary(self, summary: Dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
|
534
|
+
"""
|
|
535
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
|
536
|
+
"""
|
|
537
|
+
lines = []
|
|
538
|
+
lines.append("Application Name: "+self.CASE_TYPE)
|
|
539
|
+
lines.append("Application Version: "+self.CASE_VERSION)
|
|
540
|
+
if len(incidents) > 0:
|
|
541
|
+
lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
|
|
542
|
+
if len(tracking_stats) > 0:
|
|
543
|
+
lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
|
|
544
|
+
if len(business_analytics) > 0:
|
|
545
|
+
lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
|
|
546
|
+
|
|
547
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
|
548
|
+
lines.append("Summary: "+"No Summary Data")
|
|
549
|
+
|
|
550
|
+
return ["\n".join(lines)]
|
|
551
|
+
|
|
552
|
+
def _get_track_ids_info(self, detections: List[Dict]) -> Dict[str, Any]:
|
|
553
|
+
"""Get detailed information about track IDs."""
|
|
554
|
+
frame_track_ids = {det.get('track_id') for det in detections if det.get('track_id') is not None}
|
|
555
|
+
total_track_ids = set()
|
|
556
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
|
557
|
+
total_track_ids.update(s)
|
|
558
|
+
return {
|
|
559
|
+
"total_count": len(total_track_ids),
|
|
560
|
+
"current_frame_count": len(frame_track_ids),
|
|
561
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
562
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
563
|
+
"last_update_time": time.time(),
|
|
564
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
def _update_tracking_state(self, detections: List[Dict]):
|
|
568
|
+
"""Track unique track_ids per category for total count after tracking."""
|
|
569
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
|
570
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
|
571
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
572
|
+
|
|
573
|
+
for det in detections:
|
|
574
|
+
cat = det.get("category")
|
|
575
|
+
raw_track_id = det.get("track_id")
|
|
576
|
+
if cat not in self.target_categories or raw_track_id is None:
|
|
577
|
+
continue
|
|
578
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
579
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
580
|
+
det["track_id"] = canonical_id
|
|
581
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
582
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
|
583
|
+
|
|
584
|
+
def get_total_counts(self):
|
|
585
|
+
"""Return total unique track_id count for each category."""
|
|
586
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
|
587
|
+
|
|
588
|
+
def _format_timestamp(self, timestamp: Any) -> str:
|
|
589
|
+
"""Format a timestamp so that exactly two digits follow the decimal point (milliseconds)."""
|
|
590
|
+
if isinstance(timestamp, (int, float)):
|
|
591
|
+
timestamp = datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d-%H:%M:%S.%f UTC')
|
|
592
|
+
if not isinstance(timestamp, str):
|
|
593
|
+
return str(timestamp)
|
|
594
|
+
if '.' not in timestamp:
|
|
595
|
+
return timestamp
|
|
596
|
+
main_part, fractional_and_suffix = timestamp.split('.', 1)
|
|
597
|
+
if ' ' in fractional_and_suffix:
|
|
598
|
+
fractional_part, suffix = fractional_and_suffix.split(' ', 1)
|
|
599
|
+
suffix = ' ' + suffix
|
|
600
|
+
else:
|
|
601
|
+
fractional_part, suffix = fractional_and_suffix, ''
|
|
602
|
+
fractional_part = (fractional_part + '00')[:2]
|
|
603
|
+
return f"{main_part}.{fractional_part}{suffix}"
|
|
604
|
+
|
|
605
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
606
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
|
607
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
608
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
609
|
+
|
|
610
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
611
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
612
|
+
hours = int(timestamp // 3600)
|
|
613
|
+
minutes = int((timestamp % 3600) // 60)
|
|
614
|
+
seconds = round(float(timestamp % 60), 2)
|
|
615
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
616
|
+
|
|
617
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
618
|
+
"""Get formatted current timestamp based on stream type."""
|
|
619
|
+
if not stream_info:
|
|
620
|
+
return "00:00:00.00"
|
|
621
|
+
if precision:
|
|
622
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
623
|
+
if frame_id:
|
|
624
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
625
|
+
else:
|
|
626
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
627
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
628
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
629
|
+
else:
|
|
630
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
631
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
632
|
+
if frame_id:
|
|
633
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
634
|
+
else:
|
|
635
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
636
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
637
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
638
|
+
else:
|
|
639
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
640
|
+
if stream_time_str:
|
|
641
|
+
try:
|
|
642
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
643
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
644
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
645
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
646
|
+
except:
|
|
647
|
+
return self._format_timestamp_for_stream(time.time())
|
|
648
|
+
else:
|
|
649
|
+
return self._format_timestamp_for_stream(time.time())
|
|
650
|
+
|
|
651
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
652
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
653
|
+
if not stream_info:
|
|
654
|
+
return "00:00:00"
|
|
655
|
+
if precision:
|
|
656
|
+
if self.start_timer is None:
|
|
657
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
658
|
+
return self._format_timestamp(self.start_timer)
|
|
659
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
660
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
661
|
+
return self._format_timestamp(self.start_timer)
|
|
662
|
+
else:
|
|
663
|
+
return self._format_timestamp(self.start_timer)
|
|
664
|
+
if self.start_timer is None:
|
|
665
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
666
|
+
return self._format_timestamp(self.start_timer)
|
|
667
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
668
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
669
|
+
return self._format_timestamp(self.start_timer)
|
|
670
|
+
else:
|
|
671
|
+
if self.start_timer is not None:
|
|
672
|
+
return self._format_timestamp(self.start_timer)
|
|
673
|
+
if self._tracking_start_time is None:
|
|
674
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
675
|
+
if stream_time_str:
|
|
676
|
+
try:
|
|
677
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
678
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
679
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
680
|
+
except:
|
|
681
|
+
self._tracking_start_time = time.time()
|
|
682
|
+
else:
|
|
683
|
+
self._tracking_start_time = time.time()
|
|
684
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
685
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
686
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
687
|
+
|
|
688
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
689
|
+
"""Compute IoU between two bounding boxes."""
|
|
690
|
+
def _bbox_to_list(bbox):
|
|
691
|
+
if bbox is None:
|
|
692
|
+
return []
|
|
693
|
+
if isinstance(bbox, list):
|
|
694
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
695
|
+
if isinstance(bbox, dict):
|
|
696
|
+
if "xmin" in bbox:
|
|
697
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
698
|
+
if "x1" in bbox:
|
|
699
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
700
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
701
|
+
return values[:4] if len(values) >= 4 else []
|
|
702
|
+
return []
|
|
703
|
+
|
|
704
|
+
l1 = _bbox_to_list(box1)
|
|
705
|
+
l2 = _bbox_to_list(box2)
|
|
706
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
707
|
+
return 0.0
|
|
708
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
709
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
710
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
711
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
712
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
713
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
714
|
+
inter_x_min = max(x1_min, x2_min)
|
|
715
|
+
inter_y_min = max(y1_min, y2_min)
|
|
716
|
+
inter_x_max = min(x1_max, x2_max)
|
|
717
|
+
inter_y_max = min(y1_max, y2_max)
|
|
718
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
719
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
720
|
+
inter_area = inter_w * inter_h
|
|
721
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
722
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
723
|
+
union_area = area1 + area2 - inter_area
|
|
724
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
725
|
+
|
|
726
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
727
|
+
"""Return a stable canonical ID for a raw tracker ID."""
|
|
728
|
+
if raw_id is None or bbox is None:
|
|
729
|
+
return raw_id
|
|
730
|
+
now = time.time()
|
|
731
|
+
if not hasattr(self, '_track_aliases'):
|
|
732
|
+
self._track_aliases = {}
|
|
733
|
+
if not hasattr(self, '_canonical_tracks'):
|
|
734
|
+
self._canonical_tracks = {}
|
|
735
|
+
if raw_id in self._track_aliases:
|
|
736
|
+
canonical_id = self._track_aliases[raw_id]
|
|
737
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
738
|
+
if track_info is not None:
|
|
739
|
+
track_info["last_bbox"] = bbox
|
|
740
|
+
track_info["last_update"] = now
|
|
741
|
+
track_info["raw_ids"].add(raw_id)
|
|
742
|
+
return canonical_id
|
|
743
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
744
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
745
|
+
continue
|
|
746
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
747
|
+
if iou >= self._track_merge_iou_threshold:
|
|
748
|
+
self._track_aliases[raw_id] = canonical_id
|
|
749
|
+
info["last_bbox"] = bbox
|
|
750
|
+
info["last_update"] = now
|
|
751
|
+
info["raw_ids"].add(raw_id)
|
|
752
|
+
return canonical_id
|
|
753
|
+
canonical_id = raw_id
|
|
754
|
+
self._track_aliases[raw_id] = canonical_id
|
|
755
|
+
self._canonical_tracks[canonical_id] = {
|
|
756
|
+
"last_bbox": bbox,
|
|
757
|
+
"last_update": now,
|
|
758
|
+
"raw_ids": {raw_id},
|
|
759
|
+
}
|
|
760
|
+
return canonical_id
|
|
761
|
+
|
|
762
|
+
def _extract_predictions(self, detections: List[Dict]) -> List[Dict[str, Any]]:
|
|
763
|
+
"""Extract prediction details for output."""
|
|
764
|
+
return [
|
|
765
|
+
{
|
|
766
|
+
"category": det.get("category", "weapon"),
|
|
767
|
+
"confidence": det.get("confidence", 0.0),
|
|
768
|
+
"bounding_box": det.get("bounding_box", {})
|
|
769
|
+
}
|
|
770
|
+
for det in detections
|
|
771
|
+
]
|