matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,645 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
import time
|
|
4
|
+
from collections import deque
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
|
|
7
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
|
|
8
|
+
from ..core.config import BaseConfig, AlertConfig
|
|
9
|
+
from ..utils import (
|
|
10
|
+
filter_by_confidence,
|
|
11
|
+
calculate_counting_summary,
|
|
12
|
+
match_results_structure,
|
|
13
|
+
apply_category_mapping,
|
|
14
|
+
bbox_smoothing,
|
|
15
|
+
BBoxSmoothingConfig,
|
|
16
|
+
BBoxSmoothingTracker
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class PPEComplianceConfig(BaseConfig):
|
|
21
|
+
# Smoothing configuration
|
|
22
|
+
enable_smoothing: bool = True
|
|
23
|
+
smoothing_algorithm: str = "observability" # "window" or "observability"
|
|
24
|
+
smoothing_window_size: int = 20
|
|
25
|
+
smoothing_cooldown_frames: int = 5
|
|
26
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
27
|
+
|
|
28
|
+
# Violation thresholds
|
|
29
|
+
no_hardhat_threshold: float = 0.91
|
|
30
|
+
no_mask_threshold: float = 0.4
|
|
31
|
+
no_safety_vest_threshold: float = 0.4
|
|
32
|
+
|
|
33
|
+
violation_categories: List[str] = field(default_factory=lambda: [
|
|
34
|
+
"NO-Hardhat", "NO-Mask", "NO-Safety Vest"
|
|
35
|
+
])
|
|
36
|
+
alert_config: Optional[AlertConfig] = None
|
|
37
|
+
index_to_category: Optional[Dict[int, str]] = field(default_factory=lambda: {
|
|
38
|
+
0: 'Hardhat', 1: 'Mask', 2: 'NO-Hardhat', 3: 'NO-Mask', 4: 'NO-Safety Vest',
|
|
39
|
+
5: 'Person', 6: 'Safety Cone', 7: 'Safety Vest', 8: 'machinery', 9: 'vehicle'
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
class PPEComplianceUseCase(BaseProcessor):
|
|
43
|
+
def get_camera_info_from_stream(self, stream_info: Optional[Dict[str, Any]]) -> Dict[str, Any]:
|
|
44
|
+
"""
|
|
45
|
+
Extract camera information from stream_info dict, matching mask_detection's approach.
|
|
46
|
+
"""
|
|
47
|
+
if not stream_info:
|
|
48
|
+
return {"camera_name": None, "camera_group": None, "location": None}
|
|
49
|
+
input_settings = stream_info.get("input_settings", {})
|
|
50
|
+
camera_name = input_settings.get("camera_name")
|
|
51
|
+
camera_group = input_settings.get("camera_group")
|
|
52
|
+
location = input_settings.get("location")
|
|
53
|
+
return {
|
|
54
|
+
"camera_name": camera_name,
|
|
55
|
+
"camera_group": camera_group,
|
|
56
|
+
"location": location
|
|
57
|
+
}
|
|
58
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
59
|
+
"""Return a stable canonical ID for a raw tracker ID, merging fragmented tracks when IoU and temporal constraints indicate they represent the same physical object."""
|
|
60
|
+
if not hasattr(self, '_track_aliases'):
|
|
61
|
+
self._track_aliases = {}
|
|
62
|
+
self._canonical_tracks = {}
|
|
63
|
+
self._canonical_id_counter = 0
|
|
64
|
+
if raw_id is None or bbox is None:
|
|
65
|
+
return raw_id
|
|
66
|
+
now = time.time()
|
|
67
|
+
# Fast path – raw_id already mapped
|
|
68
|
+
if raw_id in self._track_aliases:
|
|
69
|
+
canonical_id = self._track_aliases[raw_id]
|
|
70
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
71
|
+
if track_info is not None:
|
|
72
|
+
track_info["last_bbox"] = bbox
|
|
73
|
+
track_info["last_update"] = now
|
|
74
|
+
track_info["raw_ids"].add(raw_id)
|
|
75
|
+
return canonical_id
|
|
76
|
+
# Attempt to merge with an existing canonical track (IoU + time window)
|
|
77
|
+
best_iou = 0.0
|
|
78
|
+
best_canonical = None
|
|
79
|
+
for cid, info in self._canonical_tracks.items():
|
|
80
|
+
last_bbox = info.get("last_bbox")
|
|
81
|
+
last_update = info.get("last_update", 0)
|
|
82
|
+
if last_bbox is not None and now - last_update < 2.0:
|
|
83
|
+
iou = self._iou(bbox, last_bbox)
|
|
84
|
+
if iou > 0.7 and iou > best_iou:
|
|
85
|
+
best_iou = iou
|
|
86
|
+
best_canonical = cid
|
|
87
|
+
if best_canonical is not None:
|
|
88
|
+
self._track_aliases[raw_id] = best_canonical
|
|
89
|
+
info = self._canonical_tracks[best_canonical]
|
|
90
|
+
info["last_bbox"] = bbox
|
|
91
|
+
info["last_update"] = now
|
|
92
|
+
info["raw_ids"].add(raw_id)
|
|
93
|
+
return best_canonical
|
|
94
|
+
# New canonical track
|
|
95
|
+
canonical_id = f"ppe_{self._canonical_id_counter}"
|
|
96
|
+
self._canonical_id_counter += 1
|
|
97
|
+
self._track_aliases[raw_id] = canonical_id
|
|
98
|
+
self._canonical_tracks[canonical_id] = {
|
|
99
|
+
"last_bbox": bbox,
|
|
100
|
+
"last_update": now,
|
|
101
|
+
"raw_ids": {raw_id},
|
|
102
|
+
}
|
|
103
|
+
return canonical_id
|
|
104
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
|
105
|
+
"""
|
|
106
|
+
Get detailed information about track IDs for PPE violations (per frame).
|
|
107
|
+
"""
|
|
108
|
+
# Collect all track_ids in this frame
|
|
109
|
+
frame_track_ids = set()
|
|
110
|
+
for det in detections:
|
|
111
|
+
tid = det.get('track_id')
|
|
112
|
+
if tid is not None:
|
|
113
|
+
frame_track_ids.add(tid)
|
|
114
|
+
# Use persistent total set for unique counting
|
|
115
|
+
total_track_ids = set()
|
|
116
|
+
for s in getattr(self, '_violation_total_track_ids', {}).values():
|
|
117
|
+
total_track_ids.update(s)
|
|
118
|
+
return {
|
|
119
|
+
"total_count": len(total_track_ids),
|
|
120
|
+
"current_frame_count": len(frame_track_ids),
|
|
121
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
122
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
123
|
+
"last_update_time": time.time(),
|
|
124
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
@staticmethod
|
|
128
|
+
def _iou(bbox1, bbox2):
|
|
129
|
+
"""Compute IoU between two bboxes (dicts with xmin/ymin/xmax/ymax)."""
|
|
130
|
+
x1 = max(bbox1["xmin"], bbox2["xmin"])
|
|
131
|
+
y1 = max(bbox1["ymin"], bbox2["ymin"])
|
|
132
|
+
x2 = min(bbox1["xmax"], bbox2["xmax"])
|
|
133
|
+
y2 = min(bbox1["ymax"], bbox2["ymax"])
|
|
134
|
+
inter_w = max(0, x2 - x1)
|
|
135
|
+
inter_h = max(0, y2 - y1)
|
|
136
|
+
inter_area = inter_w * inter_h
|
|
137
|
+
area1 = (bbox1["xmax"] - bbox1["xmin"]) * (bbox1["ymax"] - bbox1["ymin"])
|
|
138
|
+
area2 = (bbox2["xmax"] - bbox2["xmin"]) * (bbox2["ymax"] - bbox2["ymin"])
|
|
139
|
+
union = area1 + area2 - inter_area
|
|
140
|
+
if union == 0:
|
|
141
|
+
return 0.0
|
|
142
|
+
return inter_area / union
|
|
143
|
+
|
|
144
|
+
@staticmethod
|
|
145
|
+
def _deduplicate_violations(detections, iou_thresh=0.7):
|
|
146
|
+
"""Suppress duplicate/overlapping violations with same label and high IoU."""
|
|
147
|
+
filtered = []
|
|
148
|
+
used = [False] * len(detections)
|
|
149
|
+
for i, det in enumerate(detections):
|
|
150
|
+
if used[i]:
|
|
151
|
+
continue
|
|
152
|
+
group = [i]
|
|
153
|
+
for j in range(i+1, len(detections)):
|
|
154
|
+
if used[j]:
|
|
155
|
+
continue
|
|
156
|
+
if det.get("category") == detections[j].get("category"):
|
|
157
|
+
bbox1 = det.get("bounding_box")
|
|
158
|
+
bbox2 = detections[j].get("bounding_box")
|
|
159
|
+
if bbox1 and bbox2:
|
|
160
|
+
iou = PPEComplianceUseCase._iou(bbox1, bbox2)
|
|
161
|
+
if iou > iou_thresh:
|
|
162
|
+
used[j] = True
|
|
163
|
+
group.append(j)
|
|
164
|
+
# Keep the highest confidence detection in the group
|
|
165
|
+
best_idx = max(group, key=lambda idx: detections[idx].get("confidence", 0))
|
|
166
|
+
filtered.append(detections[best_idx])
|
|
167
|
+
used[best_idx] = True
|
|
168
|
+
return filtered
|
|
169
|
+
|
|
170
|
+
def _update_violation_tracking_state(self, detections: list):
|
|
171
|
+
"""
|
|
172
|
+
Track unique violation track_ids per category for total count after tracking.
|
|
173
|
+
Uses canonical ID merging to avoid duplicate counting when the tracker loses and reassigns IDs.
|
|
174
|
+
"""
|
|
175
|
+
if not hasattr(self, '_violation_total_track_ids'):
|
|
176
|
+
self._violation_total_track_ids = {cat: set() for cat in self.violation_categories}
|
|
177
|
+
self._violation_current_frame_track_ids = {cat: set() for cat in self.violation_categories}
|
|
178
|
+
for det in detections:
|
|
179
|
+
cat = det.get('category')
|
|
180
|
+
raw_track_id = det.get('track_id')
|
|
181
|
+
if cat not in self.violation_categories or raw_track_id is None:
|
|
182
|
+
continue
|
|
183
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
184
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
185
|
+
det["track_id"] = canonical_id # propagate canonical ID
|
|
186
|
+
self._violation_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
187
|
+
self._violation_current_frame_track_ids[cat].add(canonical_id)
|
|
188
|
+
|
|
189
|
+
def get_total_violation_counts(self):
|
|
190
|
+
"""
|
|
191
|
+
Return total unique track_id count for each violation category.
|
|
192
|
+
"""
|
|
193
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_violation_total_track_ids', {}).items()}
|
|
194
|
+
"""PPE compliance detection use case with violation smoothing and alerting."""
|
|
195
|
+
|
|
196
|
+
def __init__(self):
|
|
197
|
+
super().__init__("ppe_compliance_detection")
|
|
198
|
+
self.category = "ppe"
|
|
199
|
+
# List of violation categories to track
|
|
200
|
+
self.violation_categories = ["NO-Hardhat", "NO-Mask", "NO-Safety Vest"]
|
|
201
|
+
# Initialize smoothing tracker
|
|
202
|
+
self.smoothing_tracker = None
|
|
203
|
+
# Initialize advanced tracker (will be created on first use)
|
|
204
|
+
self.tracker = None
|
|
205
|
+
# Initialize tracking state variables
|
|
206
|
+
self._total_frame_counter = 0
|
|
207
|
+
self._global_frame_offset = 0
|
|
208
|
+
# Set of current frame track_ids (updated per frame)
|
|
209
|
+
self._current_frame_track_ids = set()
|
|
210
|
+
# Track start time for "TOTAL SINCE" calculation
|
|
211
|
+
self._tracking_start_time = None
|
|
212
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
213
|
+
"""Format timestamp for video chunks (HH:MM:SS.s format)."""
|
|
214
|
+
hours = int(timestamp // 3600)
|
|
215
|
+
minutes = int((timestamp % 3600) // 60)
|
|
216
|
+
seconds = round(float(timestamp % 60), 1)
|
|
217
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
218
|
+
|
|
219
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
220
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
|
221
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
222
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
223
|
+
|
|
224
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
225
|
+
"""Get formatted current timestamp based on stream type."""
|
|
226
|
+
if not stream_info:
|
|
227
|
+
return "00:00:00.00"
|
|
228
|
+
# If precision is requested, use frame-based time for video files
|
|
229
|
+
if precision:
|
|
230
|
+
if stream_info.get("feed_type", "live") == "disk":
|
|
231
|
+
start_time = stream_info.get("start_frame", 30)/stream_info.get("original_fps", 30)
|
|
232
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
233
|
+
if frame_id:
|
|
234
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
235
|
+
else:
|
|
236
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
237
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
238
|
+
return stream_time_str
|
|
239
|
+
else:
|
|
240
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
241
|
+
|
|
242
|
+
if stream_info.get("feed_type", "live") == "disk":
|
|
243
|
+
start_time = stream_info.get("start_frame", 30)/stream_info.get("original_fps", 30)
|
|
244
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
245
|
+
if frame_id:
|
|
246
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
247
|
+
else:
|
|
248
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
249
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
250
|
+
return stream_time_str
|
|
251
|
+
else:
|
|
252
|
+
# For streams, use stream_time from stream_info
|
|
253
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
254
|
+
if stream_time_str:
|
|
255
|
+
try:
|
|
256
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
257
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
258
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
259
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
260
|
+
except:
|
|
261
|
+
return self._format_timestamp_for_stream(time.time())
|
|
262
|
+
else:
|
|
263
|
+
return self._format_timestamp_for_stream(time.time())
|
|
264
|
+
|
|
265
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
266
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
267
|
+
if not stream_info:
|
|
268
|
+
return "00:00:00"
|
|
269
|
+
if precision:
|
|
270
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
271
|
+
return "00:00:00"
|
|
272
|
+
else:
|
|
273
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
274
|
+
|
|
275
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
276
|
+
return "00:00:00"
|
|
277
|
+
else:
|
|
278
|
+
if self._tracking_start_time is None:
|
|
279
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
280
|
+
if stream_time_str:
|
|
281
|
+
try:
|
|
282
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
283
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
284
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
285
|
+
except:
|
|
286
|
+
self._tracking_start_time = time.time()
|
|
287
|
+
else:
|
|
288
|
+
self._tracking_start_time = time.time()
|
|
289
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
290
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
291
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
292
|
+
|
|
293
|
+
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
294
|
+
"""
|
|
295
|
+
Main entry point for PPE compliance detection post-processing.
|
|
296
|
+
Applies category mapping, violation smoothing, counting, alerting, and summary generation.
|
|
297
|
+
Returns a ProcessingResult with all relevant outputs in the new agg_summary format
|
|
298
|
+
"""
|
|
299
|
+
start_time = time.time()
|
|
300
|
+
if not isinstance(config, PPEComplianceConfig):
|
|
301
|
+
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
|
|
302
|
+
if context is None:
|
|
303
|
+
context = ProcessingContext()
|
|
304
|
+
|
|
305
|
+
input_format = match_results_structure(data)
|
|
306
|
+
context.input_format = input_format
|
|
307
|
+
context.no_hardhat_threshold = config.no_hardhat_threshold
|
|
308
|
+
|
|
309
|
+
# Map detection indices to category names robustly (PPE only)
|
|
310
|
+
processed_data = self._robust_apply_category_mapping(data, config.index_to_category)
|
|
311
|
+
# Only keep violation categories (remove 'Person', etc.)
|
|
312
|
+
processed_data = [d for d in processed_data if d.get('category') in self.violation_categories]
|
|
313
|
+
|
|
314
|
+
# Apply bbox smoothing if enabled
|
|
315
|
+
if config.enable_smoothing:
|
|
316
|
+
if self.smoothing_tracker is None:
|
|
317
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
318
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
319
|
+
window_size=config.smoothing_window_size,
|
|
320
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
321
|
+
confidence_threshold=config.no_mask_threshold, # Use mask threshold as default
|
|
322
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
323
|
+
enable_smoothing=True
|
|
324
|
+
)
|
|
325
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
326
|
+
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
327
|
+
|
|
328
|
+
# Advanced tracking (BYTETracker-like)
|
|
329
|
+
try:
|
|
330
|
+
from ..advanced_tracker import AdvancedTracker
|
|
331
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
332
|
+
if self.tracker is None:
|
|
333
|
+
tracker_config = TrackerConfig()
|
|
334
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
335
|
+
self.logger.info("Initialized AdvancedTracker for PPE compliance tracking")
|
|
336
|
+
processed_data = self.tracker.update(processed_data)
|
|
337
|
+
except Exception as e:
|
|
338
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
339
|
+
|
|
340
|
+
self._update_violation_tracking_state(processed_data)
|
|
341
|
+
self._total_frame_counter += 1
|
|
342
|
+
|
|
343
|
+
# Frame number logic (not chunkwise, just per call)
|
|
344
|
+
frame_number = self._total_frame_counter
|
|
345
|
+
|
|
346
|
+
# Compute summaries and alerts
|
|
347
|
+
general_counting_summary = calculate_counting_summary(data)
|
|
348
|
+
counting_summary = self._count_categories(processed_data, config)
|
|
349
|
+
total_violation_counts = self.get_total_violation_counts()
|
|
350
|
+
counting_summary['total_violation_counts'] = total_violation_counts
|
|
351
|
+
insights = self._generate_insights(counting_summary, config)
|
|
352
|
+
alerts = self._check_alerts(counting_summary, config)
|
|
353
|
+
predictions = self._extract_predictions(processed_data)
|
|
354
|
+
summary = self._generate_summary(counting_summary, alerts)
|
|
355
|
+
|
|
356
|
+
# Generate new-format output (agg_summary)
|
|
357
|
+
incidents = self._generate_events(counting_summary, alerts, config, frame_number, stream_info)
|
|
358
|
+
tracking_stats = self._generate_tracking_stats(counting_summary, insights, summary, config, frame_number, stream_info)
|
|
359
|
+
business_analytics = {}
|
|
360
|
+
app_name = "ppe_compliance"
|
|
361
|
+
app_version = "1.2"
|
|
362
|
+
agg_human_text = {
|
|
363
|
+
"Application Name": app_name,
|
|
364
|
+
"Application Version": app_version,
|
|
365
|
+
"Incidents:": incidents.get("human_text", ""),
|
|
366
|
+
"Tracking Statistics:": tracking_stats.get("human_text", "")
|
|
367
|
+
}
|
|
368
|
+
agg_summary = {str(frame_number): {
|
|
369
|
+
"incidents": incidents,
|
|
370
|
+
"tracking_stats": tracking_stats,
|
|
371
|
+
"business_analytics": business_analytics,
|
|
372
|
+
"alerts": alerts,
|
|
373
|
+
"human_text": agg_human_text
|
|
374
|
+
}}
|
|
375
|
+
|
|
376
|
+
context.mark_completed()
|
|
377
|
+
result = self.create_result(
|
|
378
|
+
data={"agg_summary": agg_summary},
|
|
379
|
+
usecase=self.name,
|
|
380
|
+
category=self.category,
|
|
381
|
+
context=context
|
|
382
|
+
)
|
|
383
|
+
result.summary = summary
|
|
384
|
+
result.insights = insights
|
|
385
|
+
result.predictions = predictions
|
|
386
|
+
return result
|
|
387
|
+
|
|
388
|
+
def reset_tracker(self) -> None:
|
|
389
|
+
"""
|
|
390
|
+
Reset the advanced tracker instance.
|
|
391
|
+
|
|
392
|
+
This should be called when:
|
|
393
|
+
- Starting a completely new tracking session
|
|
394
|
+
- Switching to a different video/stream
|
|
395
|
+
- Manual reset requested by user
|
|
396
|
+
"""
|
|
397
|
+
if self.tracker is not None:
|
|
398
|
+
self.tracker.reset()
|
|
399
|
+
self.logger.info("AdvancedTracker reset for new tracking session")
|
|
400
|
+
|
|
401
|
+
def reset_violation_tracking(self) -> None:
|
|
402
|
+
"""
|
|
403
|
+
Reset violation tracking state (total counts, track IDs, etc.).
|
|
404
|
+
|
|
405
|
+
This should be called when:
|
|
406
|
+
- Starting a completely new tracking session
|
|
407
|
+
- Switching to a different video/stream
|
|
408
|
+
- Manual reset requested by user
|
|
409
|
+
"""
|
|
410
|
+
self._violation_total_track_ids = {cat: set() for cat in self.violation_categories}
|
|
411
|
+
self._total_frame_counter = 0
|
|
412
|
+
self._global_frame_offset = 0
|
|
413
|
+
self._tracking_start_time = None
|
|
414
|
+
# Also reset canonical track merging state
|
|
415
|
+
self._track_aliases = {}
|
|
416
|
+
self._canonical_tracks = {}
|
|
417
|
+
self._canonical_id_counter = 0
|
|
418
|
+
self.logger.info("PPE violation tracking state reset")
|
|
419
|
+
|
|
420
|
+
def reset_all_tracking(self) -> None:
|
|
421
|
+
"""
|
|
422
|
+
Reset both advanced tracker and violation tracking state.
|
|
423
|
+
"""
|
|
424
|
+
self.reset_tracker()
|
|
425
|
+
self.reset_violation_tracking()
|
|
426
|
+
self.logger.info("All PPE tracking state reset")
|
|
427
|
+
|
|
428
|
+
def _generate_events(self, counting_summary: Dict, alerts: List, config: PPEComplianceConfig, frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> Dict:
|
|
429
|
+
total_violations = counting_summary.get("total_count", 0)
|
|
430
|
+
severity = "info" if total_violations > 0 else "none"
|
|
431
|
+
human_text = f"INCIDENTS DETECTED @ :\n\tSeverity Level: ('ppe_compliance', '{severity}')"
|
|
432
|
+
incident = {
|
|
433
|
+
"incident_id": f"ppe_compliance_{frame_number}",
|
|
434
|
+
"incident_type": "ppe_compliance",
|
|
435
|
+
"severity_level": severity,
|
|
436
|
+
"human_text": human_text,
|
|
437
|
+
"start_time": "00:00:00",
|
|
438
|
+
"end_time": "00:00:00",
|
|
439
|
+
"camera_info": self.get_camera_info_from_stream(stream_info),
|
|
440
|
+
"level_settings": {
|
|
441
|
+
"low": 1,
|
|
442
|
+
"medium": 3,
|
|
443
|
+
"significant": 4,
|
|
444
|
+
"critical": 7
|
|
445
|
+
},
|
|
446
|
+
"alerts": alerts,
|
|
447
|
+
"alert_settings": [],
|
|
448
|
+
}
|
|
449
|
+
return incident
|
|
450
|
+
|
|
451
|
+
def _generate_tracking_stats(
|
|
452
|
+
self,
|
|
453
|
+
counting_summary: Dict,
|
|
454
|
+
insights: List[str],
|
|
455
|
+
summary: str,
|
|
456
|
+
config: PPEComplianceConfig,
|
|
457
|
+
frame_number: Optional[int] = None,
|
|
458
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
459
|
+
) -> Dict:
|
|
460
|
+
total_violations = counting_summary.get("total_count", 0)
|
|
461
|
+
per_cat = counting_summary.get("per_category_count", {})
|
|
462
|
+
cumulative = counting_summary.get("total_violation_counts", {})
|
|
463
|
+
cumulative_total = sum(cumulative.values()) if cumulative else 0
|
|
464
|
+
track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
|
|
465
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
466
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
467
|
+
human_text_lines = []
|
|
468
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
469
|
+
if total_violations > 0:
|
|
470
|
+
human_text_lines.append(f"\t- PPE Violations Detected: {total_violations}")
|
|
471
|
+
for cat in ["NO-Hardhat", "NO-Mask", "NO-Safety Vest"]:
|
|
472
|
+
count = per_cat.get(cat, 0)
|
|
473
|
+
if count > 0:
|
|
474
|
+
label = self.CATEGORY_DISPLAY.get(cat, cat).replace(" Violations", "")
|
|
475
|
+
human_text_lines.append(f"\t\t- {label}: {count}")
|
|
476
|
+
else:
|
|
477
|
+
human_text_lines.append("\t- No PPE violations detected")
|
|
478
|
+
human_text_lines.append("")
|
|
479
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
|
|
480
|
+
human_text_lines.append(f"\t- Total PPE Violations Detected: {cumulative_total}")
|
|
481
|
+
for cat in ["NO-Hardhat", "NO-Mask", "NO-Safety Vest"]:
|
|
482
|
+
count = cumulative.get(cat, 0)
|
|
483
|
+
if count > 0:
|
|
484
|
+
label = self.CATEGORY_DISPLAY.get(cat, cat).replace(" Violations", "")
|
|
485
|
+
human_text_lines.append(f"\t\t- {label}: {count}")
|
|
486
|
+
human_text = "\n".join(human_text_lines)
|
|
487
|
+
tracking_stat = {
|
|
488
|
+
"input_timestamp": datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S.%f UTC'),
|
|
489
|
+
"reset_timestamp": "00:00:00",
|
|
490
|
+
"camera_info": self.get_camera_info_from_stream(stream_info),
|
|
491
|
+
"total_counts": [
|
|
492
|
+
{"category": cat, "count": cumulative.get(cat, 0)} for cat in ["NO-Hardhat", "NO-Mask", "NO-Safety Vest"]
|
|
493
|
+
],
|
|
494
|
+
"current_counts": [
|
|
495
|
+
{"category": cat, "count": per_cat.get(cat, 0)} for cat in ["NO-Hardhat", "NO-Mask", "NO-Safety Vest"] if per_cat.get(cat, 0) > 0
|
|
496
|
+
],
|
|
497
|
+
"detections": counting_summary.get("detections", []),
|
|
498
|
+
"alerts": [],
|
|
499
|
+
"alert_settings": [],
|
|
500
|
+
"reset_settings": [
|
|
501
|
+
{
|
|
502
|
+
"interval_type": "daily",
|
|
503
|
+
"reset_time": {"value": 9, "time_unit": "hour"}
|
|
504
|
+
}
|
|
505
|
+
],
|
|
506
|
+
"human_text": human_text
|
|
507
|
+
}
|
|
508
|
+
return tracking_stat
|
|
509
|
+
|
|
510
|
+
def _count_categories(self, detections: list, config: PPEComplianceConfig) -> dict:
|
|
511
|
+
"""
|
|
512
|
+
Count the number of detections per category and return a summary dict.
|
|
513
|
+
The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', etc.
|
|
514
|
+
Output structure will include 'track_id' for each detection as per AdvancedTracker output.
|
|
515
|
+
"""
|
|
516
|
+
counts = {}
|
|
517
|
+
for det in detections:
|
|
518
|
+
cat = det.get('category', 'unknown')
|
|
519
|
+
if cat in self.violation_categories:
|
|
520
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
521
|
+
# Each detection dict will now include 'track_id' (and possibly 'frame_id')
|
|
522
|
+
filtered_detections = [
|
|
523
|
+
{
|
|
524
|
+
"bounding_box": det.get("bounding_box"),
|
|
525
|
+
"category": det.get("category"),
|
|
526
|
+
"confidence": det.get("confidence"),
|
|
527
|
+
"track_id": det.get("track_id"),
|
|
528
|
+
"frame_id": det.get("frame_id")
|
|
529
|
+
}
|
|
530
|
+
for det in detections if det.get('category') in self.violation_categories
|
|
531
|
+
]
|
|
532
|
+
return {
|
|
533
|
+
"total_count": sum(counts.values()),
|
|
534
|
+
"per_category_count": counts,
|
|
535
|
+
"detections": filtered_detections
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
# Human-friendly display names for violation categories
|
|
539
|
+
CATEGORY_DISPLAY = {
|
|
540
|
+
"NO-Hardhat": "No Hardhat Violations",
|
|
541
|
+
"NO-Mask": "No Mask Violations",
|
|
542
|
+
"NO-Safety Vest": "No Safety Vest Violations"
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
def _generate_insights(self, summary: dict, config: PPEComplianceConfig) -> List[str]:
|
|
546
|
+
"""
|
|
547
|
+
Generate human-readable insights for each violation category.
|
|
548
|
+
"""
|
|
549
|
+
insights = []
|
|
550
|
+
per_cat = summary.get("per_category_count", {})
|
|
551
|
+
for cat, count in per_cat.items():
|
|
552
|
+
display = self.CATEGORY_DISPLAY.get(cat, cat)
|
|
553
|
+
insights.append(f"{display}:{count}")
|
|
554
|
+
return insights
|
|
555
|
+
|
|
556
|
+
def _check_alerts(self, summary: dict, config: PPEComplianceConfig) -> List[Dict]:
|
|
557
|
+
"""
|
|
558
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
|
559
|
+
"""
|
|
560
|
+
alerts = []
|
|
561
|
+
if not config.alert_config:
|
|
562
|
+
return alerts
|
|
563
|
+
total = summary.get("total_count", 0)
|
|
564
|
+
if config.alert_config.count_thresholds:
|
|
565
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
566
|
+
if category == "all" and total >= threshold:
|
|
567
|
+
timestamp = datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')
|
|
568
|
+
alert_description = f"PPE violation count ({total}) exceeds threshold ({threshold})"
|
|
569
|
+
alerts.append({
|
|
570
|
+
"type": "count_threshold",
|
|
571
|
+
"severity": "warning",
|
|
572
|
+
"message": alert_description,
|
|
573
|
+
"category": category,
|
|
574
|
+
"current_count": total,
|
|
575
|
+
"threshold": threshold,
|
|
576
|
+
"human_text": f"Time: {timestamp}\n{alert_description}"
|
|
577
|
+
})
|
|
578
|
+
return alerts
|
|
579
|
+
|
|
580
|
+
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
|
581
|
+
"""
|
|
582
|
+
Extract prediction details for output (category, confidence, bounding box).
|
|
583
|
+
"""
|
|
584
|
+
return [
|
|
585
|
+
{
|
|
586
|
+
"category": det.get("category", "unknown"),
|
|
587
|
+
"confidence": det.get("confidence", 0.0),
|
|
588
|
+
"bounding_box": det.get("bounding_box", {})
|
|
589
|
+
}
|
|
590
|
+
for det in detections
|
|
591
|
+
]
|
|
592
|
+
|
|
593
|
+
def _generate_summary(self, summary: dict, alerts: List) -> str:
|
|
594
|
+
"""
|
|
595
|
+
Generate a human_text string for the result, including per-category insights if available.
|
|
596
|
+
Adds a tab before each violation label for better formatting.
|
|
597
|
+
Also always includes the cumulative violation count so far.
|
|
598
|
+
"""
|
|
599
|
+
total = summary.get("total_count", 0)
|
|
600
|
+
per_cat = summary.get("per_category_count", {})
|
|
601
|
+
cumulative = summary.get("total_violation_counts", {})
|
|
602
|
+
cumulative_total = sum(cumulative.values()) if cumulative else 0
|
|
603
|
+
lines = []
|
|
604
|
+
if total > 0:
|
|
605
|
+
lines.append(f"{total} PPE violation(s) detected")
|
|
606
|
+
if per_cat:
|
|
607
|
+
lines.append("violations:")
|
|
608
|
+
for cat, count in per_cat.items():
|
|
609
|
+
display = self.CATEGORY_DISPLAY.get(cat, cat)
|
|
610
|
+
label = display.replace(" Violations", "").replace("No ", "No ").replace("Safety Vest", "safety vest").replace("Mask", "mask").replace("Hardhat", "hardhat")
|
|
611
|
+
if count == 1:
|
|
612
|
+
lines.append(f"\t{label}")
|
|
613
|
+
else:
|
|
614
|
+
lines.append(f"\t{label}:{count}")
|
|
615
|
+
else:
|
|
616
|
+
lines.append("No PPE violation detected")
|
|
617
|
+
lines.append(f"Total PPE violations detected: {cumulative_total}")
|
|
618
|
+
if alerts:
|
|
619
|
+
lines.append(f"{len(alerts)} alert(s)")
|
|
620
|
+
return "\n".join(lines)
|
|
621
|
+
|
|
622
|
+
def _robust_apply_category_mapping(self, data, index_to_category):
|
|
623
|
+
"""
|
|
624
|
+
Map detection indices to category names, robustly handling int or numeric string indices.
|
|
625
|
+
Only for PPE use case to avoid affecting other use cases.
|
|
626
|
+
Handles both int and str keys in index_to_category.
|
|
627
|
+
"""
|
|
628
|
+
mapped = []
|
|
629
|
+
for det in data:
|
|
630
|
+
mapped_det = det.copy()
|
|
631
|
+
cat = det.get("category")
|
|
632
|
+
# Convert string numbers to int if possible
|
|
633
|
+
if isinstance(cat, str) and cat.isdigit():
|
|
634
|
+
cat_int = int(cat)
|
|
635
|
+
else:
|
|
636
|
+
cat_int = cat
|
|
637
|
+
mapped_label = None
|
|
638
|
+
if cat_int in index_to_category:
|
|
639
|
+
mapped_label = index_to_category[cat_int]
|
|
640
|
+
elif isinstance(cat_int, int) and str(cat_int) in index_to_category:
|
|
641
|
+
mapped_label = index_to_category[str(cat_int)]
|
|
642
|
+
if mapped_label is not None:
|
|
643
|
+
mapped_det["category"] = mapped_label
|
|
644
|
+
mapped.append(mapped_det)
|
|
645
|
+
return mapped
|