matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import cv2
|
|
3
|
+
from sklearn.cluster import KMeans
|
|
4
|
+
from collections import namedtuple
|
|
5
|
+
|
|
6
|
+
ColorInfo = namedtuple('ColorInfo', ['name', 'rgb', 'lab'])
|
|
7
|
+
|
|
8
|
+
def rgb_to_lab(rgb: tuple) -> np.ndarray:
|
|
9
|
+
rgb = np.array(rgb, dtype=np.uint8).reshape(1, 1, 3)
|
|
10
|
+
lab = cv2.cvtColor(rgb, cv2.COLOR_RGB2Lab)
|
|
11
|
+
return lab[0, 0]
|
|
12
|
+
|
|
13
|
+
def lab_distance(c1: np.ndarray, c2: np.ndarray) -> float:
|
|
14
|
+
return np.linalg.norm(c1 - c2)
|
|
15
|
+
|
|
16
|
+
PALETTE_RGB = {
|
|
17
|
+
"red": (255, 0, 0), "dark red": (139, 0, 0), "brick": (203, 65, 84),
|
|
18
|
+
"maroon": (128, 0, 0), "pink": (255, 192, 203), "hot pink": (255, 105, 180),
|
|
19
|
+
"peach": (255, 229, 180), "beige": (245, 245, 220), "light tan": (210, 180, 140),
|
|
20
|
+
"tan": (198, 144, 96), "brown": (165, 42, 42), "light brown": (181, 101, 29),
|
|
21
|
+
"chocolate": (123, 63, 0), "white": (255, 255, 255), "light gray": (211, 211, 211),
|
|
22
|
+
"gray": (128, 128, 128), "dark gray": (64, 64, 64), "charcoal": (54, 69, 79),
|
|
23
|
+
"black": (0, 0, 0), "yellow": (255, 255, 0), "gold": (255, 215, 0),
|
|
24
|
+
"orange": (255, 165, 0), "dark orange": (255, 140, 0), "green": (0, 128, 0),
|
|
25
|
+
"lime": (0, 255, 0), "olive": (128, 128, 0), "teal": (0, 128, 128),
|
|
26
|
+
"cyan": (0, 255, 255), "sky blue": (135, 206, 235), "light blue": (173, 216, 230),
|
|
27
|
+
"blue": (0, 0, 255), "navy": (0, 0, 128), "denim": (21, 96, 189),
|
|
28
|
+
"purple": (128, 0, 128), "violet": (238, 130, 238), "magenta": (255, 0, 255)
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
PALETTE = [
|
|
32
|
+
ColorInfo(name, rgb, rgb_to_lab(rgb)) for name, rgb in PALETTE_RGB.items()
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
def find_nearest_color(lab_color: np.ndarray):
|
|
36
|
+
min_dist = float('inf')
|
|
37
|
+
nearest_color = None
|
|
38
|
+
for color in PALETTE:
|
|
39
|
+
dist = lab_distance(lab_color, color.lab)
|
|
40
|
+
if dist < min_dist:
|
|
41
|
+
min_dist = dist
|
|
42
|
+
nearest_color = color
|
|
43
|
+
return nearest_color, min_dist
|
|
44
|
+
|
|
45
|
+
def extract_major_colors(image: np.ndarray, k: int = 3):
|
|
46
|
+
if image is None or image.size == 0:
|
|
47
|
+
return []
|
|
48
|
+
|
|
49
|
+
lab_image = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
|
|
50
|
+
pixels = lab_image.reshape(-1, 3)
|
|
51
|
+
|
|
52
|
+
k = min(k, len(pixels))
|
|
53
|
+
kmeans = KMeans(n_clusters=k, random_state=42)
|
|
54
|
+
labels = kmeans.fit_predict(pixels)
|
|
55
|
+
centers = kmeans.cluster_centers_
|
|
56
|
+
|
|
57
|
+
counts = np.bincount(labels)
|
|
58
|
+
total = pixels.shape[0]
|
|
59
|
+
|
|
60
|
+
color_results = []
|
|
61
|
+
for i, center_lab in enumerate(centers):
|
|
62
|
+
nearest_color, dist = find_nearest_color(center_lab)
|
|
63
|
+
coverage = counts[i] / total * 100
|
|
64
|
+
confidence = max(0.0, 1 - dist / 40)
|
|
65
|
+
if dist > 30 or coverage < 5:
|
|
66
|
+
continue
|
|
67
|
+
color_results.append((nearest_color.name, round(coverage, 2), round(confidence, 2)))
|
|
68
|
+
|
|
69
|
+
color_results.sort(key=lambda x: x[1], reverse=True)
|
|
70
|
+
return color_results
|
|
@@ -0,0 +1,468 @@
|
|
|
1
|
+
import cv2
|
|
2
|
+
import numpy as np
|
|
3
|
+
import json
|
|
4
|
+
import tempfile
|
|
5
|
+
import os
|
|
6
|
+
from typing import List, Dict, Any, Tuple, Optional
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
from datetime import datetime, timedelta
|
|
9
|
+
import logging
|
|
10
|
+
|
|
11
|
+
# Import your existing color extraction functions
|
|
12
|
+
#from color_map_utils import extract_major_colors
|
|
13
|
+
|
|
14
|
+
# Configure logging
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
class VideoColorClassifier:
|
|
18
|
+
"""
|
|
19
|
+
A comprehensive system for processing video frames with YOLO predictions
|
|
20
|
+
and extracting color information from detected objects.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, top_k_colors: int = 3, min_confidence: float = 0.5):
|
|
24
|
+
"""
|
|
25
|
+
Initialize the video color classifier.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
top_k_colors: Number of top colors to extract per detection
|
|
29
|
+
min_confidence: Minimum confidence threshold for detections
|
|
30
|
+
"""
|
|
31
|
+
self.top_k_colors = top_k_colors
|
|
32
|
+
self.min_confidence = min_confidence
|
|
33
|
+
self.detailed_results = []
|
|
34
|
+
self.summary_results = defaultdict(lambda: defaultdict(list))
|
|
35
|
+
|
|
36
|
+
def process_video_with_predictions(
|
|
37
|
+
self,
|
|
38
|
+
video_bytes: bytes,
|
|
39
|
+
predictions: Dict[str, List[Dict]],
|
|
40
|
+
output_dir: str = "./output",
|
|
41
|
+
fps: Optional[float] = None
|
|
42
|
+
) -> Tuple[str, str]:
|
|
43
|
+
"""
|
|
44
|
+
Main function to process video with YOLO predictions and extract colors.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
video_bytes: Raw video file bytes
|
|
48
|
+
predictions: Dict with frame_id -> list of detection dicts
|
|
49
|
+
output_dir: Directory to save output files
|
|
50
|
+
fps: Video FPS (will be auto-detected if not provided)
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
Tuple of (detailed_results_path, summary_results_path)
|
|
54
|
+
"""
|
|
55
|
+
# Create output directory
|
|
56
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
57
|
+
|
|
58
|
+
# Create temporary video file
|
|
59
|
+
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_video:
|
|
60
|
+
temp_video.write(video_bytes)
|
|
61
|
+
temp_video_path = temp_video.name
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
# Process video frame by frame
|
|
65
|
+
self._process_video_frames(temp_video_path, predictions, fps)
|
|
66
|
+
|
|
67
|
+
# Save detailed results
|
|
68
|
+
detailed_path = os.path.join(output_dir, "detailed_color_results.json")
|
|
69
|
+
self._save_detailed_results(detailed_path)
|
|
70
|
+
|
|
71
|
+
# Generate and save summary results
|
|
72
|
+
summary_path = os.path.join(output_dir, "color_summary_report.json")
|
|
73
|
+
self._generate_summary_report(summary_path, fps)
|
|
74
|
+
|
|
75
|
+
logger.info(f"Processing complete. Results saved to {output_dir}")
|
|
76
|
+
return detailed_path, summary_path
|
|
77
|
+
|
|
78
|
+
finally:
|
|
79
|
+
# Clean up temporary video file
|
|
80
|
+
if os.path.exists(temp_video_path):
|
|
81
|
+
os.unlink(temp_video_path)
|
|
82
|
+
|
|
83
|
+
def _process_video_frames(
|
|
84
|
+
self,
|
|
85
|
+
video_path: str,
|
|
86
|
+
predictions: Dict[str, List[Dict]],
|
|
87
|
+
fps: Optional[float] = None
|
|
88
|
+
):
|
|
89
|
+
"""
|
|
90
|
+
Process video frame by frame and extract colors from detections.
|
|
91
|
+
"""
|
|
92
|
+
cap = cv2.VideoCapture(video_path)
|
|
93
|
+
|
|
94
|
+
if not cap.isOpened():
|
|
95
|
+
raise ValueError(f"Could not open video file: {video_path}")
|
|
96
|
+
|
|
97
|
+
# Get video properties
|
|
98
|
+
if fps is None:
|
|
99
|
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
100
|
+
|
|
101
|
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
102
|
+
|
|
103
|
+
logger.info(f"Processing video: {total_frames} frames at {fps} FPS")
|
|
104
|
+
|
|
105
|
+
frame_count = 0
|
|
106
|
+
|
|
107
|
+
while True:
|
|
108
|
+
ret, frame = cap.read()
|
|
109
|
+
if not ret:
|
|
110
|
+
break
|
|
111
|
+
|
|
112
|
+
frame_id = str(frame_count)
|
|
113
|
+
timestamp = frame_count / fps
|
|
114
|
+
|
|
115
|
+
# Convert BGR to RGB
|
|
116
|
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
117
|
+
|
|
118
|
+
# Process detections for this frame
|
|
119
|
+
if frame_id in predictions:
|
|
120
|
+
frame_detections = predictions[frame_id]
|
|
121
|
+
self._process_frame_detections(
|
|
122
|
+
rgb_frame, frame_detections, frame_id, timestamp
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
frame_count += 1
|
|
126
|
+
|
|
127
|
+
# Log progress
|
|
128
|
+
if frame_count % 100 == 0:
|
|
129
|
+
logger.info(f"Processed {frame_count}/{total_frames} frames")
|
|
130
|
+
|
|
131
|
+
cap.release()
|
|
132
|
+
logger.info(f"Completed processing {frame_count} frames")
|
|
133
|
+
|
|
134
|
+
def _process_frame_detections(
|
|
135
|
+
self,
|
|
136
|
+
frame: np.ndarray,
|
|
137
|
+
detections: List[Dict],
|
|
138
|
+
frame_id: str,
|
|
139
|
+
timestamp: float
|
|
140
|
+
):
|
|
141
|
+
"""
|
|
142
|
+
Process all detections in a single frame.
|
|
143
|
+
"""
|
|
144
|
+
for detection in detections:
|
|
145
|
+
# Skip low confidence detections
|
|
146
|
+
if detection.get('confidence', 1.0) < self.min_confidence:
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
# Extract detection information
|
|
150
|
+
bbox = detection.get('bounding_box', detection.get('bbox'))
|
|
151
|
+
category = detection.get('category', detection.get('class', 'unknown'))
|
|
152
|
+
track_id = detection.get('track_id', detection.get('id'))
|
|
153
|
+
confidence = detection.get('confidence', 1.0)
|
|
154
|
+
|
|
155
|
+
if bbox is None:
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
# Crop object from frame
|
|
159
|
+
cropped_obj = self._crop_bbox(frame, bbox)
|
|
160
|
+
|
|
161
|
+
if cropped_obj.size == 0:
|
|
162
|
+
logger.warning(f"Empty crop for bbox: {bbox} in frame {frame_id}")
|
|
163
|
+
continue
|
|
164
|
+
|
|
165
|
+
# Extract colors
|
|
166
|
+
major_colors = [()] #extract_major_colors(cropped_obj, k=self.top_k_colors)
|
|
167
|
+
main_color = major_colors[0][0] if major_colors else "unknown"
|
|
168
|
+
|
|
169
|
+
# Create detailed result entry
|
|
170
|
+
detailed_entry = {
|
|
171
|
+
"frame_id": frame_id,
|
|
172
|
+
"timestamp": round(timestamp, 2),
|
|
173
|
+
"timestamp_formatted": self._format_timestamp(timestamp),
|
|
174
|
+
"track_id": track_id,
|
|
175
|
+
"category": category,
|
|
176
|
+
"confidence": round(confidence, 3),
|
|
177
|
+
"bbox": bbox,
|
|
178
|
+
"major_colors": major_colors,
|
|
179
|
+
"main_color": main_color,
|
|
180
|
+
"color_confidence": major_colors[0][2] if major_colors else 0.0
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
self.detailed_results.append(detailed_entry)
|
|
184
|
+
|
|
185
|
+
# Update summary data
|
|
186
|
+
self._update_summary_data(detailed_entry)
|
|
187
|
+
|
|
188
|
+
def _crop_bbox(self, image: np.ndarray, bbox: Dict[str, int]) -> np.ndarray:
|
|
189
|
+
"""
|
|
190
|
+
Crop image using bbox coordinates with bounds checking.
|
|
191
|
+
"""
|
|
192
|
+
h, w = image.shape[:2]
|
|
193
|
+
|
|
194
|
+
# Handle different bbox formats
|
|
195
|
+
if 'xmin' in bbox:
|
|
196
|
+
xmin = max(0, int(bbox["xmin"]))
|
|
197
|
+
ymin = max(0, int(bbox["ymin"]))
|
|
198
|
+
xmax = min(w, int(bbox["xmax"]))
|
|
199
|
+
ymax = min(h, int(bbox["ymax"]))
|
|
200
|
+
elif 'x' in bbox: # Alternative format
|
|
201
|
+
x, y, width, height = bbox['x'], bbox['y'], bbox['width'], bbox['height']
|
|
202
|
+
xmin = max(0, int(x))
|
|
203
|
+
ymin = max(0, int(y))
|
|
204
|
+
xmax = min(w, int(x + width))
|
|
205
|
+
ymax = min(h, int(y + height))
|
|
206
|
+
else:
|
|
207
|
+
raise ValueError(f"Unsupported bbox format: {bbox}")
|
|
208
|
+
|
|
209
|
+
return image[ymin:ymax, xmin:xmax]
|
|
210
|
+
|
|
211
|
+
def _update_summary_data(self, detection_entry: Dict):
|
|
212
|
+
"""
|
|
213
|
+
Update summary data structures with detection information.
|
|
214
|
+
"""
|
|
215
|
+
category = detection_entry['category']
|
|
216
|
+
main_color = detection_entry['main_color']
|
|
217
|
+
timestamp = detection_entry['timestamp']
|
|
218
|
+
|
|
219
|
+
# Store color occurrences with timestamps
|
|
220
|
+
self.summary_results[category][main_color].append({
|
|
221
|
+
'timestamp': timestamp,
|
|
222
|
+
'timestamp_formatted': detection_entry['timestamp_formatted'],
|
|
223
|
+
'track_id': detection_entry['track_id'],
|
|
224
|
+
'confidence': detection_entry['confidence'],
|
|
225
|
+
'color_confidence': detection_entry['color_confidence']
|
|
226
|
+
})
|
|
227
|
+
|
|
228
|
+
def _generate_summary_report(self, output_path: str, fps: float):
|
|
229
|
+
"""
|
|
230
|
+
Generate human-readable summary report.
|
|
231
|
+
"""
|
|
232
|
+
summary_report = {
|
|
233
|
+
"metadata": {
|
|
234
|
+
"generated_at": datetime.now().isoformat(),
|
|
235
|
+
"total_detections": len(self.detailed_results),
|
|
236
|
+
"video_fps": fps,
|
|
237
|
+
"categories_detected": list(self.summary_results.keys())
|
|
238
|
+
},
|
|
239
|
+
"category_color_analysis": {},
|
|
240
|
+
"time_intervals": {},
|
|
241
|
+
"color_distribution": {}
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
# Generate category-wise color analysis
|
|
245
|
+
for category, color_data in self.summary_results.items():
|
|
246
|
+
category_analysis = {
|
|
247
|
+
"total_detections": sum(len(occurrences) for occurrences in color_data.values()),
|
|
248
|
+
"colors_found": {},
|
|
249
|
+
"dominant_color": None,
|
|
250
|
+
"time_range": {"start": None, "end": None}
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
# Analyze each color for this category
|
|
254
|
+
max_count = 0
|
|
255
|
+
all_timestamps = []
|
|
256
|
+
|
|
257
|
+
for color, occurrences in color_data.items():
|
|
258
|
+
count = len(occurrences)
|
|
259
|
+
timestamps = [occ['timestamp'] for occ in occurrences]
|
|
260
|
+
all_timestamps.extend(timestamps)
|
|
261
|
+
|
|
262
|
+
if count > max_count:
|
|
263
|
+
max_count = count
|
|
264
|
+
category_analysis["dominant_color"] = color
|
|
265
|
+
|
|
266
|
+
# Calculate time intervals for this color
|
|
267
|
+
time_intervals = self._calculate_time_intervals(timestamps)
|
|
268
|
+
|
|
269
|
+
category_analysis["colors_found"][color] = {
|
|
270
|
+
"count": count,
|
|
271
|
+
"percentage": round((count / category_analysis["total_detections"]) * 100, 2),
|
|
272
|
+
"first_seen": self._format_timestamp(min(timestamps)),
|
|
273
|
+
"last_seen": self._format_timestamp(max(timestamps)),
|
|
274
|
+
"time_intervals": time_intervals,
|
|
275
|
+
"average_confidence": round(
|
|
276
|
+
np.mean([occ['color_confidence'] for occ in occurrences]), 3
|
|
277
|
+
)
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
# Set overall time range for category
|
|
281
|
+
if all_timestamps:
|
|
282
|
+
category_analysis["time_range"] = {
|
|
283
|
+
"start": self._format_timestamp(min(all_timestamps)),
|
|
284
|
+
"end": self._format_timestamp(max(all_timestamps)),
|
|
285
|
+
"duration_seconds": round(max(all_timestamps) - min(all_timestamps), 2)
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
summary_report["category_color_analysis"][category] = category_analysis
|
|
289
|
+
|
|
290
|
+
# Generate overall color distribution
|
|
291
|
+
color_counts = defaultdict(int)
|
|
292
|
+
for category_data in self.summary_results.values():
|
|
293
|
+
for color, occurrences in category_data.items():
|
|
294
|
+
color_counts[color] += len(occurrences)
|
|
295
|
+
|
|
296
|
+
total_detections = sum(color_counts.values())
|
|
297
|
+
summary_report["color_distribution"] = {
|
|
298
|
+
color: {
|
|
299
|
+
"count": count,
|
|
300
|
+
"percentage": round((count / total_detections) * 100, 2)
|
|
301
|
+
}
|
|
302
|
+
for color, count in sorted(color_counts.items(), key=lambda x: x[1], reverse=True)
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
# Generate readable insights
|
|
306
|
+
summary_report["insights"] = self._generate_insights(summary_report)
|
|
307
|
+
|
|
308
|
+
# Save summary report
|
|
309
|
+
with open(output_path, 'w') as f:
|
|
310
|
+
json.dump(summary_report, f, indent=2)
|
|
311
|
+
|
|
312
|
+
logger.info(f"Summary report saved to {output_path}")
|
|
313
|
+
|
|
314
|
+
def _calculate_time_intervals(self, timestamps: List[float]) -> List[Dict]:
|
|
315
|
+
"""
|
|
316
|
+
Calculate continuous time intervals from timestamps.
|
|
317
|
+
"""
|
|
318
|
+
if not timestamps:
|
|
319
|
+
return []
|
|
320
|
+
|
|
321
|
+
timestamps = sorted(timestamps)
|
|
322
|
+
intervals = []
|
|
323
|
+
start = timestamps[0]
|
|
324
|
+
end = timestamps[0]
|
|
325
|
+
|
|
326
|
+
for i in range(1, len(timestamps)):
|
|
327
|
+
# If gap is more than 2 seconds, start new interval
|
|
328
|
+
if timestamps[i] - end > 2.0:
|
|
329
|
+
intervals.append({
|
|
330
|
+
"start": self._format_timestamp(start),
|
|
331
|
+
"end": self._format_timestamp(end),
|
|
332
|
+
"duration": round(end - start, 2)
|
|
333
|
+
})
|
|
334
|
+
start = timestamps[i]
|
|
335
|
+
end = timestamps[i]
|
|
336
|
+
|
|
337
|
+
# Add final interval
|
|
338
|
+
intervals.append({
|
|
339
|
+
"start": self._format_timestamp(start),
|
|
340
|
+
"end": self._format_timestamp(end),
|
|
341
|
+
"duration": round(end - start, 2)
|
|
342
|
+
})
|
|
343
|
+
|
|
344
|
+
return intervals
|
|
345
|
+
|
|
346
|
+
def _generate_insights(self, summary_report: Dict) -> List[str]:
|
|
347
|
+
"""
|
|
348
|
+
Generate human-readable insights from the analysis.
|
|
349
|
+
"""
|
|
350
|
+
insights = []
|
|
351
|
+
|
|
352
|
+
# Most common category
|
|
353
|
+
category_counts = {
|
|
354
|
+
cat: data["total_detections"]
|
|
355
|
+
for cat, data in summary_report["category_color_analysis"].items()
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
if category_counts:
|
|
359
|
+
most_common_category = max(category_counts, key=category_counts.get)
|
|
360
|
+
insights.append(
|
|
361
|
+
f"Most frequently detected object: {most_common_category} "
|
|
362
|
+
f"({category_counts[most_common_category]} detections)"
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
# Most common color overall
|
|
366
|
+
color_dist = summary_report["color_distribution"]
|
|
367
|
+
if color_dist:
|
|
368
|
+
most_common_color = max(color_dist, key=lambda x: color_dist[x]["count"])
|
|
369
|
+
insights.append(
|
|
370
|
+
f"Most common color across all objects: {most_common_color} "
|
|
371
|
+
f"({color_dist[most_common_color]['percentage']}%)"
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
# Category-specific insights
|
|
375
|
+
for category, data in summary_report["category_color_analysis"].items():
|
|
376
|
+
if data["dominant_color"]:
|
|
377
|
+
dominant_color = data["dominant_color"]
|
|
378
|
+
color_data = data["colors_found"][dominant_color]
|
|
379
|
+
insights.append(
|
|
380
|
+
f"{category.title()} objects are predominantly {dominant_color} "
|
|
381
|
+
f"({color_data['percentage']}% of {category} detections)"
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
return insights
|
|
385
|
+
|
|
386
|
+
def _save_detailed_results(self, output_path: str):
|
|
387
|
+
"""
|
|
388
|
+
Save detailed frame-by-frame results.
|
|
389
|
+
"""
|
|
390
|
+
detailed_output = {
|
|
391
|
+
"metadata": {
|
|
392
|
+
"generated_at": datetime.now().isoformat(),
|
|
393
|
+
"total_frames_processed": len(set(r["frame_id"] for r in self.detailed_results)),
|
|
394
|
+
"total_detections": len(self.detailed_results),
|
|
395
|
+
"detection_categories": list(set(r["category"] for r in self.detailed_results))
|
|
396
|
+
},
|
|
397
|
+
"detections": self.detailed_results
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
with open(output_path, 'w') as f:
|
|
401
|
+
json.dump(detailed_output, f, indent=2)
|
|
402
|
+
|
|
403
|
+
logger.info(f"Detailed results saved to {output_path}")
|
|
404
|
+
|
|
405
|
+
def _format_timestamp(self, seconds: float) -> str:
|
|
406
|
+
"""
|
|
407
|
+
Format timestamp in MM:SS format.
|
|
408
|
+
"""
|
|
409
|
+
minutes = int(seconds // 60)
|
|
410
|
+
secs = seconds % 60
|
|
411
|
+
return f"{minutes:02d}:{secs:05.2f}"
|
|
412
|
+
|
|
413
|
+
def reset(self):
|
|
414
|
+
"""
|
|
415
|
+
Reset the classifier for processing a new video.
|
|
416
|
+
"""
|
|
417
|
+
self.detailed_results = []
|
|
418
|
+
self.summary_results = defaultdict(lambda: defaultdict(list))
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
# Convenience function for direct usage
|
|
422
|
+
def process_video_with_color_detection(
|
|
423
|
+
video_bytes: bytes,
|
|
424
|
+
yolo_predictions: Dict[str, List[Dict]],
|
|
425
|
+
output_dir: str = "./output",
|
|
426
|
+
top_k_colors: int = 3,
|
|
427
|
+
min_confidence: float = 0.5,
|
|
428
|
+
fps: Optional[float] = None
|
|
429
|
+
) -> Tuple[str, str]:
|
|
430
|
+
"""
|
|
431
|
+
Process video with YOLO predictions and extract color information.
|
|
432
|
+
|
|
433
|
+
Args:
|
|
434
|
+
video_bytes: Raw video file bytes
|
|
435
|
+
yolo_predictions: Dict with frame_id -> list of YOLO detection dicts
|
|
436
|
+
output_dir: Directory to save output files
|
|
437
|
+
top_k_colors: Number of top colors to extract per detection
|
|
438
|
+
min_confidence: Minimum confidence threshold for detections
|
|
439
|
+
fps: Video FPS (auto-detected if not provided)
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
Tuple of (detailed_results_path, summary_results_path)
|
|
443
|
+
|
|
444
|
+
Example:
|
|
445
|
+
>>> with open("video.mp4", "rb") as f:
|
|
446
|
+
... video_bytes = f.read()
|
|
447
|
+
>>>
|
|
448
|
+
>>> # YOLO predictions format:
|
|
449
|
+
>>> predictions = {
|
|
450
|
+
... "0": [
|
|
451
|
+
... {
|
|
452
|
+
... "category": "car",
|
|
453
|
+
... "bounding_box": {"xmin": 100, "ymin": 50, "xmax": 200, "ymax": 150},
|
|
454
|
+
... "confidence": 0.95,
|
|
455
|
+
... "track_id": "car_001"
|
|
456
|
+
... }
|
|
457
|
+
... ],
|
|
458
|
+
... "1": [...]
|
|
459
|
+
... }
|
|
460
|
+
>>>
|
|
461
|
+
>>> detailed_path, summary_path = process_video_with_color_detection(
|
|
462
|
+
... video_bytes, predictions, "./results"
|
|
463
|
+
... )
|
|
464
|
+
"""
|
|
465
|
+
classifier = VideoColorClassifier(top_k_colors, min_confidence)
|
|
466
|
+
return classifier.process_video_with_predictions(
|
|
467
|
+
video_bytes, yolo_predictions, output_dir, fps
|
|
468
|
+
)
|