matrice-analytics 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-analytics might be problematic. Click here for more details.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +142 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3188 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +681 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +1870 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +339 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +283 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +248 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +271 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1153 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1043 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +232 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1835 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +930 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1112 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +891 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +914 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1194 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +1728 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +950 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.2.dist-info/METADATA +481 -0
- matrice_analytics-0.1.2.dist-info/RECORD +160 -0
- matrice_analytics-0.1.2.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.2.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,604 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import asdict, dataclass, field
|
|
3
|
+
import time
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
|
|
6
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
|
|
7
|
+
from ..utils import (
|
|
8
|
+
filter_by_confidence,
|
|
9
|
+
apply_category_mapping,
|
|
10
|
+
calculate_counting_summary,
|
|
11
|
+
match_results_structure,
|
|
12
|
+
bbox_smoothing,
|
|
13
|
+
BBoxSmoothingConfig,
|
|
14
|
+
BBoxSmoothingTracker
|
|
15
|
+
)
|
|
16
|
+
from ..core.config import BaseConfig, AlertConfig
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class HistopathologicalCancerDetectionConfig(BaseConfig):
|
|
20
|
+
"""Configuration for Histopathological Cancer Detection."""
|
|
21
|
+
enable_smoothing: bool = True
|
|
22
|
+
smoothing_algorithm: str = "observability"
|
|
23
|
+
smoothing_window_size: int = 20
|
|
24
|
+
smoothing_cooldown_frames: int = 5
|
|
25
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
26
|
+
confidence_threshold: float = 0.6
|
|
27
|
+
usecase_categories: List[str] = field(
|
|
28
|
+
default_factory=lambda: ['Benign Cell', 'Cancer Cell', 'Correct Lumen', 'Incorrect Lumen']
|
|
29
|
+
)
|
|
30
|
+
target_categories: List[str] = field(
|
|
31
|
+
default_factory=lambda: ['Benign Cell', 'Cancer Cell', 'Correct Lumen', 'Incorrect Lumen']
|
|
32
|
+
)
|
|
33
|
+
alert_config: Optional[AlertConfig] = None
|
|
34
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
|
35
|
+
default_factory=lambda: {
|
|
36
|
+
0: "Benign Cell",
|
|
37
|
+
1: "Cancer Cell",
|
|
38
|
+
2: "Correct Lumen",
|
|
39
|
+
3: "Incorrect Lumen"
|
|
40
|
+
}
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
class HistopathologicalCancerDetectionUseCase(BaseProcessor):
|
|
44
|
+
CATEGORY_DISPLAY = {
|
|
45
|
+
"Benign Cell": "Benign Cell",
|
|
46
|
+
"Cancer Cell": "Cancer Cell",
|
|
47
|
+
"Correct Lumen": "Correct Lumen",
|
|
48
|
+
"Incorrect Lumen": "Incorrect Lumen"
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
def __init__(self):
|
|
52
|
+
super().__init__("histopathological_cancer_detection")
|
|
53
|
+
self.category = "healthcare"
|
|
54
|
+
self.CASE_TYPE = "histopathological_cancer_detection"
|
|
55
|
+
self.CASE_VERSION = "1.0"
|
|
56
|
+
self.target_categories = ['Benign Cell', 'Cancer Cell', 'Correct Lumen', 'Incorrect Lumen']
|
|
57
|
+
self.smoothing_tracker = None
|
|
58
|
+
self.tracker = None
|
|
59
|
+
self._total_frame_counter = 0
|
|
60
|
+
self._global_frame_offset = 0
|
|
61
|
+
self._tracking_start_time = None
|
|
62
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
63
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
64
|
+
self._track_merge_iou_threshold: float = 0.05
|
|
65
|
+
self._track_merge_time_window: float = 7.0
|
|
66
|
+
self._ascending_alert_list: List[int] = []
|
|
67
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
68
|
+
|
|
69
|
+
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
|
70
|
+
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
71
|
+
start_time = time.time()
|
|
72
|
+
if not isinstance(config, HistopathologicalCancerDetectionConfig):
|
|
73
|
+
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
|
|
74
|
+
context=context)
|
|
75
|
+
if context is None:
|
|
76
|
+
context = ProcessingContext()
|
|
77
|
+
|
|
78
|
+
input_format = match_results_structure(data)
|
|
79
|
+
context.input_format = input_format
|
|
80
|
+
context.confidence_threshold = config.confidence_threshold
|
|
81
|
+
|
|
82
|
+
if config.confidence_threshold is not None:
|
|
83
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
84
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
85
|
+
else:
|
|
86
|
+
processed_data = data
|
|
87
|
+
self.logger.debug("No confidence filtering applied")
|
|
88
|
+
|
|
89
|
+
if config.index_to_category:
|
|
90
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
91
|
+
self.logger.debug("Applied category mapping")
|
|
92
|
+
|
|
93
|
+
if config.target_categories:
|
|
94
|
+
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
95
|
+
self.logger.debug("Applied category filtering")
|
|
96
|
+
|
|
97
|
+
if config.enable_smoothing:
|
|
98
|
+
if self.smoothing_tracker is None:
|
|
99
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
100
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
101
|
+
window_size=config.smoothing_window_size,
|
|
102
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
103
|
+
confidence_threshold=config.confidence_threshold,
|
|
104
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
105
|
+
enable_smoothing=True
|
|
106
|
+
)
|
|
107
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
108
|
+
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
from ..advanced_tracker import AdvancedTracker
|
|
112
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
113
|
+
if self.tracker is None:
|
|
114
|
+
tracker_config = TrackerConfig()
|
|
115
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
116
|
+
self.logger.info("Initialized AdvancedTracker for Histopathological Cancer Detection")
|
|
117
|
+
processed_data = self.tracker.update(processed_data)
|
|
118
|
+
except Exception as e:
|
|
119
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
120
|
+
|
|
121
|
+
self._update_tracking_state(processed_data)
|
|
122
|
+
self._total_frame_counter += 1
|
|
123
|
+
|
|
124
|
+
frame_number = None
|
|
125
|
+
if stream_info:
|
|
126
|
+
input_settings = stream_info.get("input_settings", {})
|
|
127
|
+
start_frame = input_settings.get("start_frame")
|
|
128
|
+
end_frame = input_settings.get("end_frame")
|
|
129
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
130
|
+
frame_number = start_frame
|
|
131
|
+
|
|
132
|
+
general_counting_summary = calculate_counting_summary(data)
|
|
133
|
+
counting_summary = self._count_categories(processed_data, config)
|
|
134
|
+
total_counts = self.get_total_counts()
|
|
135
|
+
counting_summary['total_counts'] = total_counts
|
|
136
|
+
|
|
137
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
|
138
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
|
139
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
|
140
|
+
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=True)
|
|
141
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
142
|
+
|
|
143
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
144
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
145
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
146
|
+
summary = summary_list[0] if summary_list else {}
|
|
147
|
+
agg_summary = {str(frame_number): {
|
|
148
|
+
"incidents": incidents,
|
|
149
|
+
"tracking_stats": tracking_stats,
|
|
150
|
+
"business_analytics": business_analytics,
|
|
151
|
+
"alerts": alerts,
|
|
152
|
+
"human_text": summary}
|
|
153
|
+
}
|
|
154
|
+
context.mark_completed()
|
|
155
|
+
|
|
156
|
+
result = self.create_result(
|
|
157
|
+
data={"agg_summary": agg_summary},
|
|
158
|
+
usecase=self.name,
|
|
159
|
+
category=self.category,
|
|
160
|
+
context=context
|
|
161
|
+
)
|
|
162
|
+
return result
|
|
163
|
+
|
|
164
|
+
def _check_alerts(self, summary: dict, frame_number: Any, config: HistopathologicalCancerDetectionConfig) -> List[Dict]:
|
|
165
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
|
166
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
|
167
|
+
if len(window) < 2:
|
|
168
|
+
return True
|
|
169
|
+
increasing = 0
|
|
170
|
+
total = 0
|
|
171
|
+
for i in range(1, len(window)):
|
|
172
|
+
if window[i] >= window[i - 1]:
|
|
173
|
+
increasing += 1
|
|
174
|
+
total += 1
|
|
175
|
+
return increasing / total >= threshold
|
|
176
|
+
|
|
177
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
178
|
+
alerts = []
|
|
179
|
+
total_detections = summary.get("total_count", 0)
|
|
180
|
+
total_counts_dict = summary.get("total_counts", {})
|
|
181
|
+
per_category_count = summary.get("per_category_count", {})
|
|
182
|
+
|
|
183
|
+
if not config.alert_config:
|
|
184
|
+
return alerts
|
|
185
|
+
|
|
186
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
187
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
188
|
+
if category == "all" and total_detections > threshold:
|
|
189
|
+
alerts.append({
|
|
190
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
191
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
192
|
+
"incident_category": self.CASE_TYPE,
|
|
193
|
+
"threshold_level": threshold,
|
|
194
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
195
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
196
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
197
|
+
})
|
|
198
|
+
elif category in per_category_count and per_category_count[category] > threshold:
|
|
199
|
+
alerts.append({
|
|
200
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
201
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
202
|
+
"incident_category": self.CASE_TYPE,
|
|
203
|
+
"threshold_level": threshold,
|
|
204
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
205
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
206
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
207
|
+
})
|
|
208
|
+
return alerts
|
|
209
|
+
|
|
210
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: HistopathologicalCancerDetectionConfig,
|
|
211
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
212
|
+
incidents = []
|
|
213
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
214
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
215
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
216
|
+
|
|
217
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
218
|
+
|
|
219
|
+
if total_detections > 0:
|
|
220
|
+
level = "low"
|
|
221
|
+
intensity = 5.0
|
|
222
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
223
|
+
if start_timestamp and self.current_incident_end_timestamp == 'N/A':
|
|
224
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
225
|
+
elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
|
|
226
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
227
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
228
|
+
elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
|
|
229
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
230
|
+
|
|
231
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
232
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
233
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
234
|
+
if intensity >= 9:
|
|
235
|
+
level = "critical"
|
|
236
|
+
self._ascending_alert_list.append(3)
|
|
237
|
+
elif intensity >= 7:
|
|
238
|
+
level = "significant"
|
|
239
|
+
self._ascending_alert_list.append(2)
|
|
240
|
+
elif intensity >= 5:
|
|
241
|
+
level = "medium"
|
|
242
|
+
self._ascending_alert_list.append(1)
|
|
243
|
+
else:
|
|
244
|
+
level = "low"
|
|
245
|
+
self._ascending_alert_list.append(0)
|
|
246
|
+
else:
|
|
247
|
+
if total_detections > 30:
|
|
248
|
+
level = "critical"
|
|
249
|
+
intensity = 10.0
|
|
250
|
+
self._ascending_alert_list.append(3)
|
|
251
|
+
elif total_detections > 25:
|
|
252
|
+
level = "significant"
|
|
253
|
+
intensity = 9.0
|
|
254
|
+
self._ascending_alert_list.append(2)
|
|
255
|
+
elif total_detections > 15:
|
|
256
|
+
level = "medium"
|
|
257
|
+
intensity = 7.0
|
|
258
|
+
self._ascending_alert_list.append(1)
|
|
259
|
+
else:
|
|
260
|
+
level = "low"
|
|
261
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
262
|
+
self._ascending_alert_list.append(0)
|
|
263
|
+
|
|
264
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
265
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
|
266
|
+
human_text = "\n".join(human_text_lines)
|
|
267
|
+
|
|
268
|
+
alert_settings = []
|
|
269
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
270
|
+
alert_settings.append({
|
|
271
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
272
|
+
"incident_category": self.CASE_TYPE,
|
|
273
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
274
|
+
"ascending": True,
|
|
275
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
276
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
277
|
+
})
|
|
278
|
+
|
|
279
|
+
event = self.create_incident(
|
|
280
|
+
incident_id=f"{self.CASE_TYPE}_{str(frame_number)}",
|
|
281
|
+
incident_type=self.CASE_TYPE,
|
|
282
|
+
severity_level=level,
|
|
283
|
+
human_text=human_text,
|
|
284
|
+
camera_info=camera_info,
|
|
285
|
+
alerts=alerts,
|
|
286
|
+
alert_settings=alert_settings,
|
|
287
|
+
start_time=start_timestamp,
|
|
288
|
+
end_time=self.current_incident_end_timestamp,
|
|
289
|
+
level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
|
|
290
|
+
)
|
|
291
|
+
incidents.append(event)
|
|
292
|
+
else:
|
|
293
|
+
self._ascending_alert_list.append(0)
|
|
294
|
+
incidents.append({})
|
|
295
|
+
return incidents
|
|
296
|
+
|
|
297
|
+
def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: HistopathologicalCancerDetectionConfig,
|
|
298
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
299
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
300
|
+
tracking_stats = []
|
|
301
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
302
|
+
total_counts_dict = counting_summary.get("total_counts", {})
|
|
303
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
304
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
|
305
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
|
306
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
307
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
308
|
+
|
|
309
|
+
total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
|
|
310
|
+
current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
|
|
311
|
+
|
|
312
|
+
detections = []
|
|
313
|
+
for detection in counting_summary.get("detections", []):
|
|
314
|
+
bbox = detection.get("bounding_box", {})
|
|
315
|
+
category = detection.get("category", "unknown")
|
|
316
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=detection.get("masks") or detection.get("segmentation") or detection.get("mask") or [])
|
|
317
|
+
detections.append(detection_obj)
|
|
318
|
+
|
|
319
|
+
alert_settings = []
|
|
320
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
321
|
+
alert_settings.append({
|
|
322
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
323
|
+
"incident_category": self.CASE_TYPE,
|
|
324
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
325
|
+
"ascending": True,
|
|
326
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
327
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
328
|
+
})
|
|
329
|
+
|
|
330
|
+
human_text_lines = [f"Tracking Statistics:", f"CURRENT FRAME @ {current_timestamp}"]
|
|
331
|
+
for cat, count in per_category_count.items():
|
|
332
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
|
333
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
|
334
|
+
for cat, count in total_counts_dict.items():
|
|
335
|
+
if count > 0:
|
|
336
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
|
337
|
+
human_text_lines.append(f"Alerts: {alerts[0].get('settings', {})} sent @ {current_timestamp}" if alerts else "Alerts: None")
|
|
338
|
+
human_text = "\n".join(human_text_lines)
|
|
339
|
+
|
|
340
|
+
reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
|
|
341
|
+
tracking_stat = self.create_tracking_stats(
|
|
342
|
+
total_counts=total_counts,
|
|
343
|
+
current_counts=current_counts,
|
|
344
|
+
detections=detections,
|
|
345
|
+
human_text=human_text,
|
|
346
|
+
camera_info=camera_info,
|
|
347
|
+
alerts=alerts,
|
|
348
|
+
alert_settings=alert_settings,
|
|
349
|
+
reset_settings=reset_settings,
|
|
350
|
+
start_time=high_precision_start_timestamp,
|
|
351
|
+
reset_time=high_precision_reset_timestamp
|
|
352
|
+
)
|
|
353
|
+
tracking_stats.append(tracking_stat)
|
|
354
|
+
return tracking_stats
|
|
355
|
+
|
|
356
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts: List, config: HistopathologicalCancerDetectionConfig,
|
|
357
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
|
358
|
+
if is_empty:
|
|
359
|
+
return []
|
|
360
|
+
|
|
361
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
|
362
|
+
lines = {}
|
|
363
|
+
lines["Application Name"] = self.CASE_TYPE
|
|
364
|
+
lines["Application Version"] = self.CASE_VERSION
|
|
365
|
+
if len(incidents) > 0:
|
|
366
|
+
lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
|
367
|
+
if len(tracking_stats) > 0:
|
|
368
|
+
lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
|
369
|
+
if len(business_analytics) > 0:
|
|
370
|
+
lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
|
371
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
|
372
|
+
lines["Summary"] = "No Summary Data"
|
|
373
|
+
return [lines]
|
|
374
|
+
|
|
375
|
+
def _count_categories(self, detections: list, config: HistopathologicalCancerDetectionConfig) -> dict:
|
|
376
|
+
counts = {}
|
|
377
|
+
for det in detections:
|
|
378
|
+
cat = det.get('category', 'unknown')
|
|
379
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
380
|
+
return {
|
|
381
|
+
"total_count": sum(counts.values()),
|
|
382
|
+
"per_category_count": counts,
|
|
383
|
+
"detections": [
|
|
384
|
+
{
|
|
385
|
+
"bounding_box": det.get("bounding_box"),
|
|
386
|
+
"category": det.get("category"),
|
|
387
|
+
"confidence": det.get("confidence"),
|
|
388
|
+
"track_id": det.get("track_id"),
|
|
389
|
+
"frame_id": det.get("frame_id")
|
|
390
|
+
}
|
|
391
|
+
for det in detections
|
|
392
|
+
]
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
def _generate_insights(self, summary: dict, config: HistopathologicalCancerDetectionConfig) -> List[str]:
|
|
396
|
+
insights = []
|
|
397
|
+
per_cat = summary.get("per_category_count", {})
|
|
398
|
+
total_detections = summary.get("total_count", 0)
|
|
399
|
+
|
|
400
|
+
if total_detections == 0:
|
|
401
|
+
insights.append("No detections in the scene")
|
|
402
|
+
return insights
|
|
403
|
+
insights.append(f"EVENT: Detected {total_detections} in the scene")
|
|
404
|
+
intensity_threshold = config.alert_config.count_thresholds.get("all") if config.alert_config and config.alert_config.count_thresholds else None
|
|
405
|
+
if intensity_threshold:
|
|
406
|
+
percentage = (total_detections / intensity_threshold) * 100
|
|
407
|
+
if percentage < 20:
|
|
408
|
+
insights.append(f"INTENSITY: Low detection rate ({percentage:.1f}% of capacity)")
|
|
409
|
+
elif percentage <= 50:
|
|
410
|
+
insights.append(f"INTENSITY: Moderate detection rate ({percentage:.1f}% of capacity)")
|
|
411
|
+
elif percentage <= 70:
|
|
412
|
+
insights.append(f"INTENSITY: High detection rate ({percentage:.1f}% of capacity)")
|
|
413
|
+
else:
|
|
414
|
+
insights.append(f"INTENSITY: Severe detection rate ({percentage:.1f}% of capacity)")
|
|
415
|
+
for cat, count in per_cat.items():
|
|
416
|
+
display = self.CATEGORY_DISPLAY.get(cat, cat)
|
|
417
|
+
insights.append(f"{display}: {count}")
|
|
418
|
+
return insights
|
|
419
|
+
|
|
420
|
+
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
|
421
|
+
return [
|
|
422
|
+
{
|
|
423
|
+
"category": det.get("category", "unknown"),
|
|
424
|
+
"confidence": det.get("confidence", 0.0),
|
|
425
|
+
"bounding_box": det.get("bounding_box", {})
|
|
426
|
+
}
|
|
427
|
+
for det in detections
|
|
428
|
+
]
|
|
429
|
+
|
|
430
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
|
431
|
+
frame_track_ids = set()
|
|
432
|
+
for det in detections:
|
|
433
|
+
tid = det.get('track_id')
|
|
434
|
+
if tid is not None:
|
|
435
|
+
frame_track_ids.add(tid)
|
|
436
|
+
total_track_ids = set()
|
|
437
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
|
438
|
+
total_track_ids.update(s)
|
|
439
|
+
return {
|
|
440
|
+
"total_count": len(total_track_ids),
|
|
441
|
+
"current_frame_count": len(frame_track_ids),
|
|
442
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
443
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
444
|
+
"last_update_time": time.time(),
|
|
445
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
def _update_tracking_state(self, detections: list):
|
|
449
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
|
450
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
|
451
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
452
|
+
|
|
453
|
+
for det in detections:
|
|
454
|
+
cat = det.get("category")
|
|
455
|
+
raw_track_id = det.get("track_id")
|
|
456
|
+
if cat not in self.target_categories or raw_track_id is None:
|
|
457
|
+
continue
|
|
458
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
459
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
460
|
+
det["track_id"] = canonical_id
|
|
461
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
462
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
|
463
|
+
|
|
464
|
+
def get_total_counts(self):
|
|
465
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
|
466
|
+
|
|
467
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
468
|
+
hours = int(timestamp // 3600)
|
|
469
|
+
minutes = int((timestamp % 3600) // 60)
|
|
470
|
+
seconds = timestamp % 60
|
|
471
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
|
|
472
|
+
|
|
473
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
474
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
475
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
476
|
+
|
|
477
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
478
|
+
if not stream_info:
|
|
479
|
+
return "00:00:00.00"
|
|
480
|
+
if precision:
|
|
481
|
+
if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
|
482
|
+
stream_time_str = stream_info.get("video_timestamp", "")
|
|
483
|
+
return stream_time_str[:8]
|
|
484
|
+
else:
|
|
485
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
486
|
+
if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
|
487
|
+
stream_time_str = stream_info.get("video_timestamp", "")
|
|
488
|
+
return stream_time_str[:8]
|
|
489
|
+
else:
|
|
490
|
+
stream_time_str = stream_info.get("stream_time", "")
|
|
491
|
+
if stream_time_str:
|
|
492
|
+
try:
|
|
493
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
494
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
495
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
496
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
497
|
+
except:
|
|
498
|
+
return self._format_timestamp_for_stream(time.time())
|
|
499
|
+
return self._format_timestamp_for_stream(time.time())
|
|
500
|
+
|
|
501
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
502
|
+
if not stream_info:
|
|
503
|
+
return "00:00:00"
|
|
504
|
+
if precision:
|
|
505
|
+
if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
|
506
|
+
return "00:00:00"
|
|
507
|
+
else:
|
|
508
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
509
|
+
if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
|
|
510
|
+
return "00:00:00"
|
|
511
|
+
else:
|
|
512
|
+
if self._tracking_start_time is None:
|
|
513
|
+
stream_time_str = stream_info.get("stream_time", "")
|
|
514
|
+
if stream_time_str:
|
|
515
|
+
try:
|
|
516
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
517
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
518
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
519
|
+
except:
|
|
520
|
+
self._tracking_start_time = time.time()
|
|
521
|
+
else:
|
|
522
|
+
self._tracking_start_time = time.time()
|
|
523
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
524
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
525
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
526
|
+
|
|
527
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
528
|
+
def _bbox_to_list(bbox):
|
|
529
|
+
if bbox is None:
|
|
530
|
+
return []
|
|
531
|
+
if isinstance(bbox, list):
|
|
532
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
533
|
+
if isinstance(bbox, dict):
|
|
534
|
+
if "xmin" in bbox:
|
|
535
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
536
|
+
if "x1" in bbox:
|
|
537
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
538
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
539
|
+
return values[:4] if len(values) >= 4 else []
|
|
540
|
+
return []
|
|
541
|
+
|
|
542
|
+
l1 = _bbox_to_list(box1)
|
|
543
|
+
l2 = _bbox_to_list(box2)
|
|
544
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
545
|
+
return 0.0
|
|
546
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
547
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
548
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
549
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
550
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
551
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
552
|
+
inter_x_min = max(x1_min, x2_min)
|
|
553
|
+
inter_y_min = max(y1_min, y2_min)
|
|
554
|
+
inter_x_max = min(x1_max, x2_max)
|
|
555
|
+
inter_y_max = min(y1_max, y2_max)
|
|
556
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
557
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
558
|
+
inter_area = inter_w * inter_h
|
|
559
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
560
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
561
|
+
union_area = area1 + area2 - inter_area
|
|
562
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
563
|
+
|
|
564
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
565
|
+
if raw_id is None or bbox is None:
|
|
566
|
+
return raw_id
|
|
567
|
+
now = time.time()
|
|
568
|
+
if raw_id in self._track_aliases:
|
|
569
|
+
canonical_id = self._track_aliases[raw_id]
|
|
570
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
571
|
+
if track_info is not None:
|
|
572
|
+
track_info["last_bbox"] = bbox
|
|
573
|
+
track_info["last_update"] = now
|
|
574
|
+
track_info["raw_ids"].add(raw_id)
|
|
575
|
+
return canonical_id
|
|
576
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
577
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
578
|
+
continue
|
|
579
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
580
|
+
if iou >= self._track_merge_iou_threshold:
|
|
581
|
+
self._track_aliases[raw_id] = canonical_id
|
|
582
|
+
info["last_bbox"] = bbox
|
|
583
|
+
info["last_update"] = now
|
|
584
|
+
info["raw_ids"].add(raw_id)
|
|
585
|
+
return canonical_id
|
|
586
|
+
canonical_id = raw_id
|
|
587
|
+
self._track_aliases[raw_id] = canonical_id
|
|
588
|
+
self._canonical_tracks[canonical_id] = {
|
|
589
|
+
"last_bbox": bbox,
|
|
590
|
+
"last_update": now,
|
|
591
|
+
"raw_ids": {raw_id},
|
|
592
|
+
}
|
|
593
|
+
return canonical_id
|
|
594
|
+
|
|
595
|
+
def _format_timestamp(self, timestamp: float) -> str:
|
|
596
|
+
return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
|
|
597
|
+
|
|
598
|
+
def _get_tracking_start_time(self) -> str:
|
|
599
|
+
if self._tracking_start_time is None:
|
|
600
|
+
return "N/A"
|
|
601
|
+
return self._format_timestamp(self._tracking_start_time)
|
|
602
|
+
|
|
603
|
+
def _set_tracking_start_time(self) -> None:
|
|
604
|
+
self._tracking_start_time = time.time()
|