matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,775 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import asdict
|
|
3
|
+
import time
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
|
|
6
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
|
7
|
+
from ..utils import (
|
|
8
|
+
filter_by_confidence,
|
|
9
|
+
filter_by_categories,
|
|
10
|
+
apply_category_mapping,
|
|
11
|
+
count_objects_by_category,
|
|
12
|
+
count_objects_in_zones,
|
|
13
|
+
calculate_counting_summary,
|
|
14
|
+
match_results_structure,
|
|
15
|
+
bbox_smoothing,
|
|
16
|
+
BBoxSmoothingConfig,
|
|
17
|
+
BBoxSmoothingTracker
|
|
18
|
+
)
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class UndergroundPipelineDefectConfig(BaseConfig):
|
|
25
|
+
"""Configuration for Underground Pipeline Defect Detection use case."""
|
|
26
|
+
# Smoothing configuration
|
|
27
|
+
enable_smoothing: bool = True
|
|
28
|
+
smoothing_algorithm: str = "observability" # "window" or "observability"
|
|
29
|
+
smoothing_window_size: int = 20
|
|
30
|
+
smoothing_cooldown_frames: int = 5
|
|
31
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
32
|
+
usecase: str = "underground_pipeline_defect"
|
|
33
|
+
|
|
34
|
+
# Confidence threshold
|
|
35
|
+
confidence_threshold: float = 0.5
|
|
36
|
+
|
|
37
|
+
usecase_categories: List[str] = field(
|
|
38
|
+
default_factory=lambda: ['hole', 'crack', 'obstacle']
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
target_categories: List[str] = field(
|
|
42
|
+
default_factory=lambda: ['hole', 'crack', 'obstacle']
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
alert_config: Optional[AlertConfig] = None
|
|
46
|
+
|
|
47
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
|
48
|
+
default_factory=lambda: {
|
|
49
|
+
0: 'hole',
|
|
50
|
+
1: 'crack',
|
|
51
|
+
2: 'obstacle'
|
|
52
|
+
}
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class UndergroundPipelineDefectUseCase(BaseProcessor):
|
|
57
|
+
def __init__(self):
|
|
58
|
+
super().__init__("underground_pipeline_defect")
|
|
59
|
+
self.category = "general"
|
|
60
|
+
|
|
61
|
+
self.CASE_TYPE: Optional[str] = 'underground_pipeline_defect'
|
|
62
|
+
self.CASE_VERSION: Optional[str] = '1.0'
|
|
63
|
+
# List of categories to track
|
|
64
|
+
self.target_categories = ['hole', 'crack', 'obstacle']
|
|
65
|
+
|
|
66
|
+
# Initialize smoothing tracker
|
|
67
|
+
self.smoothing_tracker = None
|
|
68
|
+
|
|
69
|
+
# Initialize advanced tracker (will be created on first use)
|
|
70
|
+
self.tracker = None
|
|
71
|
+
# Initialize tracking state variables
|
|
72
|
+
self._total_frame_counter = 0
|
|
73
|
+
self._global_frame_offset = 0
|
|
74
|
+
|
|
75
|
+
# Track start time for "TOTAL SINCE" calculation
|
|
76
|
+
self._tracking_start_time = None
|
|
77
|
+
|
|
78
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
79
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
80
|
+
# Tunable parameters – adjust if necessary for specific scenarios
|
|
81
|
+
self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05
|
|
82
|
+
self._track_merge_time_window: float = 7.0 # seconds within which to merge
|
|
83
|
+
|
|
84
|
+
self._ascending_alert_list: List[int] = []
|
|
85
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
86
|
+
self.start_timer = None
|
|
87
|
+
|
|
88
|
+
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
|
89
|
+
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
90
|
+
"""
|
|
91
|
+
Main entry point for post-processing.
|
|
92
|
+
Applies category mapping, smoothing, counting, alerting, and summary generation.
|
|
93
|
+
Returns a ProcessingResult with all relevant outputs.
|
|
94
|
+
"""
|
|
95
|
+
start_time = time.time()
|
|
96
|
+
# Ensure config is correct type
|
|
97
|
+
if not isinstance(config, UndergroundPipelineDefectConfig):
|
|
98
|
+
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
|
|
99
|
+
context=context)
|
|
100
|
+
if context is None:
|
|
101
|
+
context = ProcessingContext()
|
|
102
|
+
|
|
103
|
+
# Detect input format and store in context
|
|
104
|
+
input_format = match_results_structure(data)
|
|
105
|
+
context.input_format = input_format
|
|
106
|
+
context.confidence_threshold = config.confidence_threshold
|
|
107
|
+
print(f"config.confidence_threshold: {config.confidence_threshold}")
|
|
108
|
+
print("Raw data: ", data)
|
|
109
|
+
|
|
110
|
+
if config.confidence_threshold is not None:
|
|
111
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
112
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
113
|
+
else:
|
|
114
|
+
processed_data = data
|
|
115
|
+
self.logger.debug("Did not apply confidence filtering since no threshold was provided")
|
|
116
|
+
|
|
117
|
+
# Step 2: Apply category mapping if provided
|
|
118
|
+
if config.index_to_category:
|
|
119
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
120
|
+
self.logger.debug("Applied category mapping")
|
|
121
|
+
|
|
122
|
+
print(f"processed_data: {processed_data}")
|
|
123
|
+
|
|
124
|
+
if config.target_categories:
|
|
125
|
+
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
126
|
+
self.logger.debug("Applied category filtering")
|
|
127
|
+
|
|
128
|
+
# Apply bbox smoothing if enabled
|
|
129
|
+
if config.enable_smoothing:
|
|
130
|
+
if self.smoothing_tracker is None:
|
|
131
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
132
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
133
|
+
window_size=config.smoothing_window_size,
|
|
134
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
135
|
+
confidence_threshold=config.confidence_threshold,
|
|
136
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
137
|
+
enable_smoothing=True
|
|
138
|
+
)
|
|
139
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
140
|
+
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
141
|
+
|
|
142
|
+
# Advanced tracking (BYTETracker-like)
|
|
143
|
+
try:
|
|
144
|
+
from ..advanced_tracker import AdvancedTracker
|
|
145
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
146
|
+
|
|
147
|
+
if self.tracker is None:
|
|
148
|
+
if config.confidence_threshold is not None:
|
|
149
|
+
tracker_config = TrackerConfig(
|
|
150
|
+
track_high_thresh=float(config.confidence_threshold),
|
|
151
|
+
track_low_thresh=max(0.05, float(config.confidence_threshold) / 2),
|
|
152
|
+
new_track_thresh=float(config.confidence_threshold)
|
|
153
|
+
)
|
|
154
|
+
else:
|
|
155
|
+
tracker_config = TrackerConfig()
|
|
156
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
157
|
+
self.logger.info(
|
|
158
|
+
"Initialized AdvancedTracker for tracking with thresholds: "
|
|
159
|
+
f"high={tracker_config.track_high_thresh}, "
|
|
160
|
+
f"low={tracker_config.track_low_thresh}, "
|
|
161
|
+
f"new={tracker_config.new_track_thresh}"
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
processed_data = self.tracker.update(processed_data)
|
|
165
|
+
|
|
166
|
+
except Exception as e:
|
|
167
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
168
|
+
|
|
169
|
+
# Update tracking state for total count per label
|
|
170
|
+
self._update_tracking_state(processed_data)
|
|
171
|
+
|
|
172
|
+
# Update frame counter
|
|
173
|
+
self._total_frame_counter += 1
|
|
174
|
+
print(f"processed_data after tracking: {processed_data}")
|
|
175
|
+
|
|
176
|
+
# Extract frame information from stream_info
|
|
177
|
+
frame_number = None
|
|
178
|
+
if stream_info:
|
|
179
|
+
input_settings = stream_info.get("input_settings", {})
|
|
180
|
+
start_frame = input_settings.get("start_frame")
|
|
181
|
+
end_frame = input_settings.get("end_frame")
|
|
182
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
183
|
+
frame_number = start_frame
|
|
184
|
+
|
|
185
|
+
# Compute summaries and alerts
|
|
186
|
+
general_counting_summary = calculate_counting_summary(data)
|
|
187
|
+
counting_summary = self._count_categories(processed_data, config)
|
|
188
|
+
total_counts = self.get_total_counts()
|
|
189
|
+
counting_summary['total_counts'] = total_counts
|
|
190
|
+
|
|
191
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
|
192
|
+
predictions = self._extract_predictions(processed_data)
|
|
193
|
+
|
|
194
|
+
# Generate structured incidents, tracking stats, and business analytics
|
|
195
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
|
196
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
|
197
|
+
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
|
|
198
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
199
|
+
|
|
200
|
+
# Extract frame-based dictionaries
|
|
201
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
202
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
203
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
204
|
+
summary = summary_list[0] if summary_list else {}
|
|
205
|
+
agg_summary = {str(frame_number): {
|
|
206
|
+
"incidents": incidents,
|
|
207
|
+
"tracking_stats": tracking_stats,
|
|
208
|
+
"business_analytics": business_analytics,
|
|
209
|
+
"alerts": alerts,
|
|
210
|
+
"human_text": summary}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
context.mark_completed()
|
|
214
|
+
|
|
215
|
+
result = self.create_result(
|
|
216
|
+
data={"agg_summary": agg_summary},
|
|
217
|
+
usecase=self.name,
|
|
218
|
+
category=self.category,
|
|
219
|
+
context=context
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
return result
|
|
223
|
+
|
|
224
|
+
def _check_alerts(self, summary: dict, frame_number: Any, config: UndergroundPipelineDefectConfig) -> List[Dict]:
|
|
225
|
+
"""
|
|
226
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
|
227
|
+
"""
|
|
228
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
|
229
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
|
230
|
+
if len(window) < 2:
|
|
231
|
+
return True
|
|
232
|
+
increasing = 0
|
|
233
|
+
total = 0
|
|
234
|
+
for i in range(1, len(window)):
|
|
235
|
+
if window[i] >= window[i - 1]:
|
|
236
|
+
increasing += 1
|
|
237
|
+
total += 1
|
|
238
|
+
ratio = increasing / total
|
|
239
|
+
return ratio >= threshold
|
|
240
|
+
|
|
241
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
242
|
+
alerts = []
|
|
243
|
+
total_detections = summary.get("total_count", 0)
|
|
244
|
+
total_counts_dict = summary.get("total_counts", {})
|
|
245
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
|
|
246
|
+
per_category_count = summary.get("per_category_count", {})
|
|
247
|
+
|
|
248
|
+
if not config.alert_config:
|
|
249
|
+
return alerts
|
|
250
|
+
|
|
251
|
+
total = summary.get("total_count", 0)
|
|
252
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
253
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
254
|
+
if category == "all" and total > threshold:
|
|
255
|
+
alerts.append({
|
|
256
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
257
|
+
"alert_id": "alert_" + category + '_' + frame_key,
|
|
258
|
+
"incident_category": self.CASE_TYPE,
|
|
259
|
+
"threshold_level": threshold,
|
|
260
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
261
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
262
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
263
|
+
}
|
|
264
|
+
})
|
|
265
|
+
elif category in summary.get("per_category_count", {}):
|
|
266
|
+
count = summary.get("per_category_count", {})[category]
|
|
267
|
+
if count > threshold:
|
|
268
|
+
alerts.append({
|
|
269
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
270
|
+
"alert_id": "alert_" + category + '_' + frame_key,
|
|
271
|
+
"incident_category": self.CASE_TYPE,
|
|
272
|
+
"threshold_level": threshold,
|
|
273
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
274
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
275
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
276
|
+
}
|
|
277
|
+
})
|
|
278
|
+
return alerts
|
|
279
|
+
|
|
280
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: UndergroundPipelineDefectConfig,
|
|
281
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
282
|
+
"""Generate structured incidents for the output format with frame-based keys."""
|
|
283
|
+
incidents = []
|
|
284
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
285
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
286
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
287
|
+
|
|
288
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
289
|
+
|
|
290
|
+
if total_detections > 0:
|
|
291
|
+
level = "low"
|
|
292
|
+
intensity = 5.0
|
|
293
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
294
|
+
if start_timestamp and self.current_incident_end_timestamp == 'N/A':
|
|
295
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
296
|
+
elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
|
|
297
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
298
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
299
|
+
elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
|
|
300
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
301
|
+
|
|
302
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
303
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
304
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
305
|
+
|
|
306
|
+
if intensity >= 9:
|
|
307
|
+
level = "critical"
|
|
308
|
+
self._ascending_alert_list.append(3)
|
|
309
|
+
elif intensity >= 7:
|
|
310
|
+
level = "significant"
|
|
311
|
+
self._ascending_alert_list.append(2)
|
|
312
|
+
elif intensity >= 5:
|
|
313
|
+
level = "medium"
|
|
314
|
+
self._ascending_alert_list.append(1)
|
|
315
|
+
else:
|
|
316
|
+
level = "low"
|
|
317
|
+
self._ascending_alert_list.append(0)
|
|
318
|
+
else:
|
|
319
|
+
if total_detections > 30:
|
|
320
|
+
level = "critical"
|
|
321
|
+
intensity = 10.0
|
|
322
|
+
self._ascending_alert_list.append(3)
|
|
323
|
+
elif total_detections > 25:
|
|
324
|
+
level = "significant"
|
|
325
|
+
intensity = 9.0
|
|
326
|
+
self._ascending_alert_list.append(2)
|
|
327
|
+
elif total_detections > 15:
|
|
328
|
+
level = "medium"
|
|
329
|
+
intensity = 7.0
|
|
330
|
+
self._ascending_alert_list.append(1)
|
|
331
|
+
else:
|
|
332
|
+
level = "low"
|
|
333
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
334
|
+
self._ascending_alert_list.append(0)
|
|
335
|
+
|
|
336
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
337
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
|
338
|
+
human_text = "\n".join(human_text_lines)
|
|
339
|
+
|
|
340
|
+
alert_settings = []
|
|
341
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
342
|
+
alert_settings.append({
|
|
343
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
344
|
+
"incident_category": self.CASE_TYPE,
|
|
345
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
346
|
+
"ascending": True,
|
|
347
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
348
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
349
|
+
}
|
|
350
|
+
})
|
|
351
|
+
|
|
352
|
+
event = self.create_incident(incident_id=self.CASE_TYPE + '_' + str(frame_number), incident_type=self.CASE_TYPE,
|
|
353
|
+
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts,
|
|
354
|
+
alert_settings=alert_settings, start_time=start_timestamp,
|
|
355
|
+
end_time=self.current_incident_end_timestamp,
|
|
356
|
+
level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7})
|
|
357
|
+
incidents.append(event)
|
|
358
|
+
else:
|
|
359
|
+
self._ascending_alert_list.append(0)
|
|
360
|
+
incidents.append({})
|
|
361
|
+
|
|
362
|
+
return incidents
|
|
363
|
+
|
|
364
|
+
def _generate_tracking_stats(
|
|
365
|
+
self,
|
|
366
|
+
counting_summary: Dict,
|
|
367
|
+
alerts: List,
|
|
368
|
+
config: UndergroundPipelineDefectConfig,
|
|
369
|
+
frame_number: Optional[int] = None,
|
|
370
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
371
|
+
) -> List[Dict]:
|
|
372
|
+
"""Generate structured tracking stats matching the expected format."""
|
|
373
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
374
|
+
tracking_stats = []
|
|
375
|
+
|
|
376
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
377
|
+
total_counts_dict = counting_summary.get("total_counts", {})
|
|
378
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
|
|
379
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
380
|
+
|
|
381
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
|
382
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
|
383
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
384
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
385
|
+
|
|
386
|
+
total_counts = []
|
|
387
|
+
for cat, count in total_counts_dict.items():
|
|
388
|
+
if count > 0:
|
|
389
|
+
total_counts.append({
|
|
390
|
+
"category": cat,
|
|
391
|
+
"count": count
|
|
392
|
+
})
|
|
393
|
+
|
|
394
|
+
current_counts = []
|
|
395
|
+
for cat, count in per_category_count.items():
|
|
396
|
+
if count > 0 or total_detections > 0:
|
|
397
|
+
current_counts.append({
|
|
398
|
+
"category": cat,
|
|
399
|
+
"count": count
|
|
400
|
+
})
|
|
401
|
+
|
|
402
|
+
detections = []
|
|
403
|
+
for detection in counting_summary.get("detections", []):
|
|
404
|
+
bbox = detection.get("bounding_box", {})
|
|
405
|
+
category = detection.get("category", "unknown")
|
|
406
|
+
if detection.get("masks"):
|
|
407
|
+
segmentation = detection.get("masks", [])
|
|
408
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
409
|
+
elif detection.get("segmentation"):
|
|
410
|
+
segmentation = detection.get("segmentation")
|
|
411
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
412
|
+
elif detection.get("mask"):
|
|
413
|
+
segmentation = detection.get("mask")
|
|
414
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
415
|
+
else:
|
|
416
|
+
detection_obj = self.create_detection_object(category, bbox)
|
|
417
|
+
detections.append(detection_obj)
|
|
418
|
+
|
|
419
|
+
alert_settings = []
|
|
420
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
421
|
+
alert_settings.append({
|
|
422
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
423
|
+
"incident_category": self.CASE_TYPE,
|
|
424
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
425
|
+
"ascending": True,
|
|
426
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
427
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
428
|
+
}
|
|
429
|
+
})
|
|
430
|
+
|
|
431
|
+
human_text_lines = [f"Tracking Statistics:"]
|
|
432
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
433
|
+
if total_detections > 0:
|
|
434
|
+
category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
|
|
435
|
+
if len(category_counts) == 1:
|
|
436
|
+
detection_text = category_counts[0] + " detected"
|
|
437
|
+
elif len(category_counts) == 2:
|
|
438
|
+
detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
|
|
439
|
+
else:
|
|
440
|
+
detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
|
441
|
+
human_text_lines.append(f"\t- {detection_text}")
|
|
442
|
+
else:
|
|
443
|
+
human_text_lines.append(f"\t- No detections")
|
|
444
|
+
|
|
445
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
|
446
|
+
human_text_lines.append(f"Total Defects Detected: {cumulative_total}")
|
|
447
|
+
for cat, count in total_counts_dict.items():
|
|
448
|
+
if count > 0:
|
|
449
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
|
450
|
+
|
|
451
|
+
if alerts:
|
|
452
|
+
for alert in alerts:
|
|
453
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
|
454
|
+
else:
|
|
455
|
+
human_text_lines.append("Alerts: None")
|
|
456
|
+
|
|
457
|
+
human_text = "\n".join(human_text_lines)
|
|
458
|
+
reset_settings = [
|
|
459
|
+
{
|
|
460
|
+
"interval_type": "daily",
|
|
461
|
+
"reset_time": {
|
|
462
|
+
"value": 9,
|
|
463
|
+
"time_unit": "hour"
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
]
|
|
467
|
+
|
|
468
|
+
tracking_stat = self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
|
|
469
|
+
detections=detections, human_text=human_text, camera_info=camera_info,
|
|
470
|
+
alerts=alerts, alert_settings=alert_settings,
|
|
471
|
+
reset_settings=reset_settings, start_time=high_precision_start_timestamp,
|
|
472
|
+
reset_time=high_precision_reset_timestamp)
|
|
473
|
+
|
|
474
|
+
tracking_stats.append(tracking_stat)
|
|
475
|
+
return tracking_stats
|
|
476
|
+
|
|
477
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: UndergroundPipelineDefectConfig,
|
|
478
|
+
stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
|
479
|
+
"""Generate standardized business analytics for the agg_summary structure."""
|
|
480
|
+
if is_empty:
|
|
481
|
+
return []
|
|
482
|
+
|
|
483
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[Dict]:
|
|
484
|
+
"""
|
|
485
|
+
Generate a human_text dictionary for tracking stats, incidents, business analytics, and alerts.
|
|
486
|
+
"""
|
|
487
|
+
lines = {}
|
|
488
|
+
lines["Application Name"] = self.CASE_TYPE
|
|
489
|
+
lines["Application Version"] = self.CASE_VERSION
|
|
490
|
+
if len(incidents) > 0:
|
|
491
|
+
lines["Incidents"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
|
492
|
+
if len(tracking_stats) > 0:
|
|
493
|
+
lines["Tracking Statistics"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
|
494
|
+
if len(business_analytics) > 0:
|
|
495
|
+
lines["Business Analytics"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
|
496
|
+
|
|
497
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
|
498
|
+
lines["Summary"] = "No Summary Data"
|
|
499
|
+
|
|
500
|
+
return [lines]
|
|
501
|
+
|
|
502
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
|
503
|
+
"""
|
|
504
|
+
Get detailed information about track IDs (per frame).
|
|
505
|
+
"""
|
|
506
|
+
frame_track_ids = set()
|
|
507
|
+
for det in detections:
|
|
508
|
+
tid = det.get('track_id')
|
|
509
|
+
if tid is not None:
|
|
510
|
+
frame_track_ids.add(tid)
|
|
511
|
+
total_track_ids = set()
|
|
512
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
|
513
|
+
total_track_ids.update(s)
|
|
514
|
+
return {
|
|
515
|
+
"total_count": len(total_track_ids),
|
|
516
|
+
"current_frame_count": len(frame_track_ids),
|
|
517
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
518
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
519
|
+
"last_update_time": time.time(),
|
|
520
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
def _update_tracking_state(self, detections: list):
|
|
524
|
+
"""
|
|
525
|
+
Track unique categories track_ids per category for total count after tracking.
|
|
526
|
+
"""
|
|
527
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
|
528
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
|
529
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
530
|
+
|
|
531
|
+
for det in detections:
|
|
532
|
+
cat = det.get("category")
|
|
533
|
+
raw_track_id = det.get("track_id")
|
|
534
|
+
if cat not in self.target_categories or raw_track_id is None:
|
|
535
|
+
continue
|
|
536
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
537
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
538
|
+
det["track_id"] = canonical_id
|
|
539
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
540
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
|
541
|
+
|
|
542
|
+
def get_total_counts(self):
|
|
543
|
+
"""
|
|
544
|
+
Return total unique track_id count for each category.
|
|
545
|
+
"""
|
|
546
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
|
547
|
+
|
|
548
|
+
def _format_timestamp(self, timestamp: Any) -> str:
|
|
549
|
+
"""Format a timestamp to have exactly two digits after the decimal point (milliseconds)."""
|
|
550
|
+
if isinstance(timestamp, (int, float)):
|
|
551
|
+
timestamp = datetime.fromtimestamp(timestamp, timezone.utc).strftime(
|
|
552
|
+
'%Y-%m-%d-%H:%M:%S.%f UTC'
|
|
553
|
+
)
|
|
554
|
+
if not isinstance(timestamp, str):
|
|
555
|
+
return str(timestamp)
|
|
556
|
+
if '.' not in timestamp:
|
|
557
|
+
return timestamp
|
|
558
|
+
main_part, fractional_and_suffix = timestamp.split('.', 1)
|
|
559
|
+
if ' ' in fractional_and_suffix:
|
|
560
|
+
fractional_part, suffix = fractional_and_suffix.split(' ', 1)
|
|
561
|
+
suffix = ' ' + suffix
|
|
562
|
+
else:
|
|
563
|
+
fractional_part, suffix = fractional_and_suffix, ''
|
|
564
|
+
fractional_part = (fractional_part + '00')[:2]
|
|
565
|
+
return f"{main_part}.{fractional_part}{suffix}"
|
|
566
|
+
|
|
567
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
568
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
|
569
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
570
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
571
|
+
|
|
572
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
573
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
574
|
+
hours = int(timestamp // 3600)
|
|
575
|
+
minutes = int((timestamp % 3600) // 60)
|
|
576
|
+
seconds = round(float(timestamp % 60), 2)
|
|
577
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
578
|
+
|
|
579
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
580
|
+
"""Get formatted current timestamp based on stream type."""
|
|
581
|
+
if not stream_info:
|
|
582
|
+
return "00:00:00.00"
|
|
583
|
+
if precision:
|
|
584
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
585
|
+
if frame_id:
|
|
586
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
587
|
+
else:
|
|
588
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
589
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
590
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
591
|
+
else:
|
|
592
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
593
|
+
|
|
594
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
595
|
+
if frame_id:
|
|
596
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
597
|
+
else:
|
|
598
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
599
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
600
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
601
|
+
else:
|
|
602
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
603
|
+
if stream_time_str:
|
|
604
|
+
try:
|
|
605
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
606
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
607
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
608
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
609
|
+
except:
|
|
610
|
+
return self._format_timestamp_for_stream(time.time())
|
|
611
|
+
else:
|
|
612
|
+
return self._format_timestamp_for_stream(time.time())
|
|
613
|
+
|
|
614
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
615
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
616
|
+
if not stream_info:
|
|
617
|
+
return "00:00:00"
|
|
618
|
+
if precision:
|
|
619
|
+
if self.start_timer is None:
|
|
620
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
621
|
+
return self._format_timestamp(self.start_timer)
|
|
622
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
623
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
624
|
+
return self._format_timestamp(self.start_timer)
|
|
625
|
+
else:
|
|
626
|
+
return self._format_timestamp(self.start_timer)
|
|
627
|
+
|
|
628
|
+
if self.start_timer is None:
|
|
629
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
630
|
+
return self._format_timestamp(self.start_timer)
|
|
631
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
632
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
633
|
+
return self._format_timestamp(self.start_timer)
|
|
634
|
+
else:
|
|
635
|
+
if self.start_timer is not None:
|
|
636
|
+
return self._format_timestamp(self.start_timer)
|
|
637
|
+
if self._tracking_start_time is None:
|
|
638
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
639
|
+
if stream_time_str:
|
|
640
|
+
try:
|
|
641
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
642
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
643
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
644
|
+
except:
|
|
645
|
+
self._tracking_start_time = time.time()
|
|
646
|
+
else:
|
|
647
|
+
self._tracking_start_time = time.time()
|
|
648
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
649
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
650
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
651
|
+
|
|
652
|
+
def _get_tracking_start_time(self) -> str:
|
|
653
|
+
"""Get the tracking start time, formatted as a string."""
|
|
654
|
+
if self._tracking_start_time is None:
|
|
655
|
+
return "N/A"
|
|
656
|
+
return self._format_timestamp(self._tracking_start_time)
|
|
657
|
+
|
|
658
|
+
def _set_tracking_start_time(self) -> None:
|
|
659
|
+
"""Set the tracking start time to the current time."""
|
|
660
|
+
self._tracking_start_time = time.time()
|
|
661
|
+
|
|
662
|
+
def _count_categories(self, detections: list, config: UndergroundPipelineDefectConfig) -> dict:
|
|
663
|
+
"""
|
|
664
|
+
Count the number of detections per category and return a summary dict.
|
|
665
|
+
"""
|
|
666
|
+
counts = {}
|
|
667
|
+
for det in detections:
|
|
668
|
+
cat = det.get('category', 'unknown')
|
|
669
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
670
|
+
return {
|
|
671
|
+
"total_count": sum(counts.values()),
|
|
672
|
+
"per_category_count": counts,
|
|
673
|
+
"detections": [
|
|
674
|
+
{
|
|
675
|
+
"bounding_box": det.get("bounding_box"),
|
|
676
|
+
"category": det.get("category"),
|
|
677
|
+
"confidence": det.get("confidence"),
|
|
678
|
+
"track_id": det.get("track_id"),
|
|
679
|
+
"frame_id": det.get("frame_id")
|
|
680
|
+
}
|
|
681
|
+
for det in detections
|
|
682
|
+
]
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
|
686
|
+
"""
|
|
687
|
+
Extract prediction details for output (category, confidence, bounding box).
|
|
688
|
+
"""
|
|
689
|
+
return [
|
|
690
|
+
{
|
|
691
|
+
"category": det.get("category", "unknown"),
|
|
692
|
+
"confidence": det.get("confidence", 0.0),
|
|
693
|
+
"bounding_box": det.get("bounding_box", {})
|
|
694
|
+
}
|
|
695
|
+
for det in detections
|
|
696
|
+
]
|
|
697
|
+
|
|
698
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
699
|
+
"""Compute IoU between two bounding boxes which may be dicts or lists."""
|
|
700
|
+
def _bbox_to_list(bbox):
|
|
701
|
+
if bbox is None:
|
|
702
|
+
return []
|
|
703
|
+
if isinstance(bbox, list):
|
|
704
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
705
|
+
if isinstance(bbox, dict):
|
|
706
|
+
if "xmin" in bbox:
|
|
707
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
708
|
+
if "x1" in bbox:
|
|
709
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
710
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
711
|
+
return values[:4] if len(values) >= 4 else []
|
|
712
|
+
return []
|
|
713
|
+
|
|
714
|
+
l1 = _bbox_to_list(box1)
|
|
715
|
+
l2 = _bbox_to_list(box2)
|
|
716
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
717
|
+
return 0.0
|
|
718
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
719
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
720
|
+
|
|
721
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
722
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
723
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
724
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
725
|
+
|
|
726
|
+
inter_x_min = max(x1_min, x2_min)
|
|
727
|
+
inter_y_min = max(y1_min, y2_min)
|
|
728
|
+
inter_x_max = min(x1_max, x2_max)
|
|
729
|
+
inter_y_max = min(y1_max, y2_max)
|
|
730
|
+
|
|
731
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
732
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
733
|
+
inter_area = inter_w * inter_h
|
|
734
|
+
|
|
735
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
736
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
737
|
+
union_area = area1 + area2 - inter_area
|
|
738
|
+
|
|
739
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
740
|
+
|
|
741
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
742
|
+
"""Return a stable canonical ID for a raw tracker ID, merging fragmented tracks."""
|
|
743
|
+
if raw_id is None or bbox is None:
|
|
744
|
+
return raw_id
|
|
745
|
+
|
|
746
|
+
now = time.time()
|
|
747
|
+
|
|
748
|
+
if raw_id in self._track_aliases:
|
|
749
|
+
canonical_id = self._track_aliases[raw_id]
|
|
750
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
751
|
+
if track_info is not None:
|
|
752
|
+
track_info["last_bbox"] = bbox
|
|
753
|
+
track_info["last_update"] = now
|
|
754
|
+
track_info["raw_ids"].add(raw_id)
|
|
755
|
+
return canonical_id
|
|
756
|
+
|
|
757
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
758
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
759
|
+
continue
|
|
760
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
761
|
+
if iou >= self._track_merge_iou_threshold:
|
|
762
|
+
self._track_aliases[raw_id] = canonical_id
|
|
763
|
+
info["last_bbox"] = bbox
|
|
764
|
+
info["last_update"] = now
|
|
765
|
+
info["raw_ids"].add(raw_id)
|
|
766
|
+
return canonical_id
|
|
767
|
+
|
|
768
|
+
canonical_id = raw_id
|
|
769
|
+
self._track_aliases[raw_id] = canonical_id
|
|
770
|
+
self._canonical_tracks[canonical_id] = {
|
|
771
|
+
"last_bbox": bbox,
|
|
772
|
+
"last_update": now,
|
|
773
|
+
"raw_ids": {raw_id},
|
|
774
|
+
}
|
|
775
|
+
return canonical_id
|