matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,660 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import sys
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import logging
|
|
5
|
+
import subprocess
|
|
6
|
+
import shutil
|
|
7
|
+
import os
|
|
8
|
+
log_file = open("pip_jetson_bti.log", "w")
|
|
9
|
+
cmd = ["pip", "install", "importlib-resources"]
|
|
10
|
+
subprocess.run(
|
|
11
|
+
cmd,
|
|
12
|
+
stdout=log_file,
|
|
13
|
+
stderr=subprocess.STDOUT,
|
|
14
|
+
# preexec_fn=os.setpgrp
|
|
15
|
+
)
|
|
16
|
+
cmd = ["pip", "install", "httpx", "aiohttp", "filterpy"]
|
|
17
|
+
subprocess.run(
|
|
18
|
+
cmd,
|
|
19
|
+
stdout=log_file,
|
|
20
|
+
stderr=subprocess.STDOUT,
|
|
21
|
+
# preexec_fn=os.setpgrp
|
|
22
|
+
)
|
|
23
|
+
log_file.close()
|
|
24
|
+
|
|
25
|
+
import numpy as np
|
|
26
|
+
from typing import List, Dict, Tuple, Optional
|
|
27
|
+
from dataclasses import dataclass, field
|
|
28
|
+
from pathlib import Path
|
|
29
|
+
import cv2
|
|
30
|
+
import io
|
|
31
|
+
import threading
|
|
32
|
+
|
|
33
|
+
# Try to import optional CLIP dependencies (fail gracefully if missing)
|
|
34
|
+
# These will be None if not available, and auto-install will be attempted on first use
|
|
35
|
+
ort = None
|
|
36
|
+
Image = None
|
|
37
|
+
CLIPProcessor = None
|
|
38
|
+
ir_files = None
|
|
39
|
+
ir_as_file = None
|
|
40
|
+
|
|
41
|
+
# Track auto-installation status (singleton pattern to avoid repeated attempts)
|
|
42
|
+
_installation_attempted = False
|
|
43
|
+
_installation_successful = False
|
|
44
|
+
_installation_error = None
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
import onnxruntime as ort
|
|
48
|
+
from PIL import Image
|
|
49
|
+
from transformers import CLIPProcessor
|
|
50
|
+
from importlib.resources import files as ir_files, as_file as ir_as_file
|
|
51
|
+
print("✓ CLIP dependencies available (onnxruntime, PIL, transformers)")
|
|
52
|
+
_installation_successful = True # Already available
|
|
53
|
+
except ImportError as e:
|
|
54
|
+
print(f"⚠ CLIP dependencies not available at import time: {e}")
|
|
55
|
+
print("→ Will attempt auto-installation when color detection is first used")
|
|
56
|
+
except Exception as e:
|
|
57
|
+
print(f"⚠ Error importing CLIP dependencies: {e}")
|
|
58
|
+
print("→ Color detection may be disabled")
|
|
59
|
+
|
|
60
|
+
def try_install_clip_dependencies():
|
|
61
|
+
"""
|
|
62
|
+
Attempt to install missing CLIP dependencies.
|
|
63
|
+
Only called when ClipProcessor is actually instantiated (lazy installation).
|
|
64
|
+
Uses singleton pattern to ensure installation only happens ONCE per session.
|
|
65
|
+
Returns True if successful, False otherwise.
|
|
66
|
+
"""
|
|
67
|
+
global _installation_attempted, _installation_successful, _installation_error
|
|
68
|
+
|
|
69
|
+
# Check if we already attempted installation
|
|
70
|
+
if _installation_attempted:
|
|
71
|
+
if _installation_successful:
|
|
72
|
+
print("✓ CLIP dependencies already available (from previous installation)")
|
|
73
|
+
return True
|
|
74
|
+
else:
|
|
75
|
+
print(f"✗ CLIP dependencies installation already failed previously")
|
|
76
|
+
if _installation_error:
|
|
77
|
+
print(f" Previous error: {_installation_error}")
|
|
78
|
+
print("→ Skipping repeated installation attempt")
|
|
79
|
+
return False
|
|
80
|
+
|
|
81
|
+
# Mark that we're attempting installation (prevents concurrent attempts)
|
|
82
|
+
_installation_attempted = True
|
|
83
|
+
|
|
84
|
+
print("→ Color detection is being used but dependencies are missing")
|
|
85
|
+
print("→ Attempting ONE-TIME auto-installation of missing packages...")
|
|
86
|
+
|
|
87
|
+
import platform
|
|
88
|
+
import subprocess
|
|
89
|
+
|
|
90
|
+
# Detect platform
|
|
91
|
+
machine = platform.machine().lower()
|
|
92
|
+
is_jetson = machine in ['aarch64', 'arm64'] or os.path.exists('/etc/nv_tegra_release')
|
|
93
|
+
|
|
94
|
+
if is_jetson:
|
|
95
|
+
print(f"→ Detected Jetson/ARM platform ({machine})")
|
|
96
|
+
else:
|
|
97
|
+
print(f"→ Detected x86_64 platform ({machine})")
|
|
98
|
+
|
|
99
|
+
packages_to_install = []
|
|
100
|
+
|
|
101
|
+
# Helper function to check if package is installed
|
|
102
|
+
def is_package_installed(package_name):
|
|
103
|
+
try:
|
|
104
|
+
result = subprocess.run(
|
|
105
|
+
[sys.executable, "-m", "pip", "show", package_name],
|
|
106
|
+
capture_output=True,
|
|
107
|
+
text=True,
|
|
108
|
+
timeout=10
|
|
109
|
+
)
|
|
110
|
+
return result.returncode == 0
|
|
111
|
+
except Exception:
|
|
112
|
+
return False
|
|
113
|
+
|
|
114
|
+
# Check which packages are missing
|
|
115
|
+
try:
|
|
116
|
+
import onnxruntime
|
|
117
|
+
print("→ onnxruntime imported successfully")
|
|
118
|
+
except ImportError as e:
|
|
119
|
+
# Check if onnxruntime is installed but import is failing
|
|
120
|
+
if is_package_installed("onnxruntime"):
|
|
121
|
+
print(f"→ onnxruntime is installed but import failed: {e}")
|
|
122
|
+
print("→ This may be due to NumPy version mismatch or CUDA library issues")
|
|
123
|
+
print("→ Skipping installation, will check other dependencies")
|
|
124
|
+
elif is_jetson and is_package_installed("onnxruntime-gpu"):
|
|
125
|
+
print("→ onnxruntime-gpu found on Jetson (this may not work on ARM)")
|
|
126
|
+
print("→ Attempting import with installed version")
|
|
127
|
+
else:
|
|
128
|
+
# Choose appropriate onnxruntime package based on platform
|
|
129
|
+
if is_jetson:
|
|
130
|
+
print("→ onnxruntime not installed on Jetson platform")
|
|
131
|
+
print("→ Note: Jetson usually has onnxruntime pre-installed via JetPack")
|
|
132
|
+
print("→ If you see this, check your JetPack installation")
|
|
133
|
+
# Try installing anyway
|
|
134
|
+
packages_to_install.append("onnxruntime")
|
|
135
|
+
else:
|
|
136
|
+
packages_to_install.append("onnxruntime-gpu")
|
|
137
|
+
|
|
138
|
+
try:
|
|
139
|
+
from PIL import Image as PILImage
|
|
140
|
+
print("→ PIL/Pillow imported successfully")
|
|
141
|
+
except ImportError:
|
|
142
|
+
if not is_package_installed("pillow") and not is_package_installed("PIL"):
|
|
143
|
+
packages_to_install.append("pillow")
|
|
144
|
+
else:
|
|
145
|
+
print("→ pillow is installed but import failed")
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
from transformers import CLIPProcessor as CLIP
|
|
149
|
+
print("→ transformers imported successfully")
|
|
150
|
+
except ImportError:
|
|
151
|
+
if not is_package_installed("transformers"):
|
|
152
|
+
packages_to_install.append("transformers")
|
|
153
|
+
else:
|
|
154
|
+
print("→ transformers is installed but import failed (may be incompatible with torch version)")
|
|
155
|
+
|
|
156
|
+
# Check for tqdm (required by transformers)
|
|
157
|
+
try:
|
|
158
|
+
import tqdm
|
|
159
|
+
print("→ tqdm imported successfully")
|
|
160
|
+
except ImportError:
|
|
161
|
+
if not is_package_installed("tqdm"):
|
|
162
|
+
print("→ tqdm not found, adding to install list")
|
|
163
|
+
packages_to_install.append("tqdm")
|
|
164
|
+
else:
|
|
165
|
+
print("→ tqdm is installed but import failed")
|
|
166
|
+
# Reinstall tqdm if it's broken
|
|
167
|
+
packages_to_install.append("tqdm")
|
|
168
|
+
|
|
169
|
+
if not packages_to_install:
|
|
170
|
+
print("→ All packages are available, retrying import...")
|
|
171
|
+
else:
|
|
172
|
+
print(f"→ Installing: {', '.join(packages_to_install)}")
|
|
173
|
+
|
|
174
|
+
import subprocess
|
|
175
|
+
try:
|
|
176
|
+
for package in packages_to_install:
|
|
177
|
+
print(f" Installing {package}...")
|
|
178
|
+
result = subprocess.run(
|
|
179
|
+
[sys.executable, "-m", "pip", "install", package, "--no-warn-script-location"],
|
|
180
|
+
capture_output=True,
|
|
181
|
+
text=True,
|
|
182
|
+
timeout=300
|
|
183
|
+
)
|
|
184
|
+
if result.returncode == 0:
|
|
185
|
+
print(f" ✓ {package} installed successfully")
|
|
186
|
+
else:
|
|
187
|
+
error_msg = f"Failed to install {package}: {result.stderr}"
|
|
188
|
+
print(f" ✗ {error_msg}")
|
|
189
|
+
_installation_error = error_msg
|
|
190
|
+
return False
|
|
191
|
+
except Exception as install_error:
|
|
192
|
+
error_msg = f"Installation failed: {install_error}"
|
|
193
|
+
print(f"✗ {error_msg}")
|
|
194
|
+
_installation_error = error_msg
|
|
195
|
+
return False
|
|
196
|
+
|
|
197
|
+
# Retry imports after installation (or if packages were already installed)
|
|
198
|
+
print("→ Attempting to import dependencies...")
|
|
199
|
+
|
|
200
|
+
ort_module = None
|
|
201
|
+
PILImage = None
|
|
202
|
+
CLIPProc = None
|
|
203
|
+
ir_files_module = None
|
|
204
|
+
ir_as_file_module = None
|
|
205
|
+
|
|
206
|
+
import_errors = []
|
|
207
|
+
|
|
208
|
+
# Try importing each package individually
|
|
209
|
+
try:
|
|
210
|
+
import onnxruntime as ort_module
|
|
211
|
+
print(" ✓ onnxruntime imported")
|
|
212
|
+
except Exception as e:
|
|
213
|
+
import_errors.append(f"onnxruntime: {e}")
|
|
214
|
+
print(f" ✗ onnxruntime import failed: {e}")
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
from PIL import Image as PILImage
|
|
218
|
+
print(" ✓ PIL imported")
|
|
219
|
+
except Exception as e:
|
|
220
|
+
import_errors.append(f"PIL: {e}")
|
|
221
|
+
print(f" ✗ PIL import failed: {e}")
|
|
222
|
+
|
|
223
|
+
try:
|
|
224
|
+
from transformers import CLIPProcessor as CLIPProc
|
|
225
|
+
print(" ✓ transformers imported")
|
|
226
|
+
except Exception as e:
|
|
227
|
+
import_errors.append(f"transformers: {e}")
|
|
228
|
+
print(f" ✗ transformers import failed: {e}")
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
from importlib.resources import files as ir_files_module, as_file as ir_as_file_module
|
|
232
|
+
print(" ✓ importlib.resources imported")
|
|
233
|
+
except Exception as e:
|
|
234
|
+
import_errors.append(f"importlib.resources: {e}")
|
|
235
|
+
print(f" ✗ importlib.resources import failed: {e}")
|
|
236
|
+
|
|
237
|
+
# Check if we have at least the critical dependencies
|
|
238
|
+
if ort_module is None or PILImage is None or CLIPProc is None:
|
|
239
|
+
error_msg = f"Critical dependencies missing: {'; '.join(import_errors)}"
|
|
240
|
+
print(f"✗ {error_msg}")
|
|
241
|
+
_installation_error = error_msg
|
|
242
|
+
_installation_successful = False
|
|
243
|
+
return False
|
|
244
|
+
|
|
245
|
+
# Update global variables with successfully imported modules
|
|
246
|
+
globals()['ort'] = ort_module
|
|
247
|
+
globals()['Image'] = PILImage
|
|
248
|
+
globals()['CLIPProcessor'] = CLIPProc
|
|
249
|
+
globals()['ir_files'] = ir_files_module
|
|
250
|
+
globals()['ir_as_file'] = ir_as_file_module
|
|
251
|
+
|
|
252
|
+
print("✓ CLIP dependencies imported successfully!")
|
|
253
|
+
_installation_successful = True
|
|
254
|
+
_installation_error = None
|
|
255
|
+
return True
|
|
256
|
+
|
|
257
|
+
def load_model_from_checkpoint(checkpoint_url: str, providers: Optional[List] = None):
|
|
258
|
+
"""
|
|
259
|
+
Load an ONNX model from a URL directly into memory without writing locally.
|
|
260
|
+
Enforces the specified providers (e.g., CUDAExecutionProvider) for execution.
|
|
261
|
+
"""
|
|
262
|
+
if ort is None:
|
|
263
|
+
raise RuntimeError(
|
|
264
|
+
"onnxruntime is not available. Cannot load ONNX model.\n"
|
|
265
|
+
"Please install: pip install onnxruntime-gpu"
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
print(f"Loading model from checkpoint: {checkpoint_url}")
|
|
270
|
+
|
|
271
|
+
# Download the checkpoint with streaming
|
|
272
|
+
response = requests.get(checkpoint_url, stream=True, timeout=(30, 200))
|
|
273
|
+
response.raise_for_status()
|
|
274
|
+
|
|
275
|
+
# Read the content into bytes
|
|
276
|
+
model_bytes = io.BytesIO()
|
|
277
|
+
for chunk in response.iter_content(chunk_size=8192):
|
|
278
|
+
if chunk:
|
|
279
|
+
model_bytes.write(chunk)
|
|
280
|
+
model_bytes.seek(0) # reset pointer to start
|
|
281
|
+
|
|
282
|
+
# Prepare session options for performance
|
|
283
|
+
try:
|
|
284
|
+
sess_options = ort.SessionOptions()
|
|
285
|
+
# Enable all graph optimizations
|
|
286
|
+
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
|
|
287
|
+
# Conservative thread usage – GPU work dominates
|
|
288
|
+
sess_options.intra_op_num_threads = 1
|
|
289
|
+
sess_options.inter_op_num_threads = 1
|
|
290
|
+
except Exception:
|
|
291
|
+
sess_options = None
|
|
292
|
+
|
|
293
|
+
# Resolve providers
|
|
294
|
+
available = ort.get_available_providers()
|
|
295
|
+
print("Available providers:", available)
|
|
296
|
+
|
|
297
|
+
# Use provided providers or default to CUDA if available, else use first available
|
|
298
|
+
if providers is None:
|
|
299
|
+
if "CUDAExecutionProvider" in available:
|
|
300
|
+
use_providers = ["CUDAExecutionProvider"]
|
|
301
|
+
elif available:
|
|
302
|
+
use_providers = [available[0]]
|
|
303
|
+
print(f"CUDA not available, using: {use_providers[0]}")
|
|
304
|
+
else:
|
|
305
|
+
use_providers = ["CPUExecutionProvider"]
|
|
306
|
+
print("No providers detected, using CPUExecutionProvider")
|
|
307
|
+
else:
|
|
308
|
+
use_providers = providers
|
|
309
|
+
|
|
310
|
+
# Warn if requested provider is not available
|
|
311
|
+
for provider in use_providers:
|
|
312
|
+
provider_name = provider[0] if isinstance(provider, tuple) else provider
|
|
313
|
+
if provider_name not in available:
|
|
314
|
+
print(f"Warning: Requested provider '{provider_name}' not in available providers: {available}")
|
|
315
|
+
print(f"Will attempt to use it anyway, may fall back to available providers")
|
|
316
|
+
|
|
317
|
+
# Load ONNX model from bytes with enforced providers
|
|
318
|
+
model = ort.InferenceSession(
|
|
319
|
+
model_bytes.read(),
|
|
320
|
+
sess_options=sess_options,
|
|
321
|
+
providers=use_providers,
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
print("Session providers:", model.get_providers())
|
|
325
|
+
print("Model loaded successfully from checkpoint (in-memory)")
|
|
326
|
+
return model
|
|
327
|
+
|
|
328
|
+
except Exception as e:
|
|
329
|
+
print(f"Error loading model from checkpoint: {e}")
|
|
330
|
+
return None
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
class ClipProcessor:
|
|
335
|
+
def __init__(self,
|
|
336
|
+
image_model_path: str = 'https://s3.us-west-2.amazonaws.com/testing.resources/datasets/clip_image.onnx',
|
|
337
|
+
text_model_path: str = 'https://s3.us-west-2.amazonaws.com/testing.resources/datasets/clip_text.onnx',
|
|
338
|
+
processor_dir: Optional[str] = None,
|
|
339
|
+
providers: Optional[List[str]] = None):
|
|
340
|
+
|
|
341
|
+
# Check if required dependencies are available, try auto-install if not
|
|
342
|
+
if ort is None or CLIPProcessor is None or Image is None:
|
|
343
|
+
print("⚠ Color detection dependencies missing, attempting auto-installation...")
|
|
344
|
+
|
|
345
|
+
# Try to auto-install missing dependencies (lazy installation)
|
|
346
|
+
if not try_install_clip_dependencies():
|
|
347
|
+
raise RuntimeError(
|
|
348
|
+
"Required dependencies for ClipProcessor are not available.\n"
|
|
349
|
+
"Auto-installation failed. Missing: " +
|
|
350
|
+
(("onnxruntime " if ort is None else "") +
|
|
351
|
+
("transformers(CLIPProcessor) " if CLIPProcessor is None else "") +
|
|
352
|
+
("PIL(Image) " if Image is None else "")).strip() + "\n"
|
|
353
|
+
"Please install manually: pip install transformers onnxruntime-gpu pillow"
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
print("✓ Auto-installation successful, continuing with ClipProcessor initialization")
|
|
357
|
+
|
|
358
|
+
self.color_category: List[str] = ["black", "white", "yellow", "gray", "red", "blue", "light blue",
|
|
359
|
+
"green", "brown"]
|
|
360
|
+
|
|
361
|
+
self.image_url: str = image_model_path
|
|
362
|
+
self.text_url: str = text_model_path
|
|
363
|
+
# Resolve processor_dir relative to this module, not CWD
|
|
364
|
+
self.processor_path: str = self._resolve_processor_dir(processor_dir)
|
|
365
|
+
print("PROCESSOR PATH->", self.processor_path)
|
|
366
|
+
cwd = os.getcwd()
|
|
367
|
+
print("Current working directory:", cwd)
|
|
368
|
+
|
|
369
|
+
log_file = open("pip_jetson_bti.log", "w")
|
|
370
|
+
cmd = ["pip", "install", "--force-reinstall", "huggingface_hub", "regex", "safetensors"]
|
|
371
|
+
subprocess.Popen(
|
|
372
|
+
cmd,
|
|
373
|
+
stdout=log_file,
|
|
374
|
+
stderr=subprocess.STDOUT,
|
|
375
|
+
# preexec_fn=os.setpgrp
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
# Determine and enforce providers (prefer CUDA only)
|
|
379
|
+
try:
|
|
380
|
+
available = ort.get_available_providers()
|
|
381
|
+
except Exception:
|
|
382
|
+
print("Error getting ONNX providers - this should not happen if dependencies check passed")
|
|
383
|
+
available = []
|
|
384
|
+
print("True OG Available ONNX providers:", available, 'providers(if any):',providers)
|
|
385
|
+
|
|
386
|
+
if providers is None:
|
|
387
|
+
if "CUDAExecutionProvider" in available:
|
|
388
|
+
self.providers = ["CUDAExecutionProvider"]
|
|
389
|
+
else:
|
|
390
|
+
# CUDA not available, fall back to CPU or other available providers
|
|
391
|
+
print("CUDAExecutionProvider not available; falling back to available providers")
|
|
392
|
+
if available:
|
|
393
|
+
self.providers = [available[0]] # Use first available provider
|
|
394
|
+
print(f"Using provider: {self.providers[0]}")
|
|
395
|
+
else:
|
|
396
|
+
self.providers = ["CPUExecutionProvider"] # Ultimate fallback
|
|
397
|
+
print("No providers detected, using CPUExecutionProvider")
|
|
398
|
+
else:
|
|
399
|
+
self.providers = providers
|
|
400
|
+
|
|
401
|
+
# Thread-safety to serialize processing
|
|
402
|
+
self._lock = threading.Lock()
|
|
403
|
+
print("Curr Providersss: ",self.providers)
|
|
404
|
+
|
|
405
|
+
self.image_sess = load_model_from_checkpoint(self.image_url, providers=self.providers)
|
|
406
|
+
self.text_sess = load_model_from_checkpoint(self.text_url, providers=self.providers)
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
# Load CLIPProcessor tokenizer/config from local package data if available
|
|
410
|
+
self.processor = None
|
|
411
|
+
|
|
412
|
+
# Double-check CLIPProcessor is available (should never be None at this point due to check above)
|
|
413
|
+
if CLIPProcessor is None:
|
|
414
|
+
raise RuntimeError(
|
|
415
|
+
"CRITICAL: CLIPProcessor is None despite early check. This should never happen.\n"
|
|
416
|
+
"The auto-installation may have failed. Please manually install: pip install transformers"
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
try:
|
|
420
|
+
if self.processor_path and os.path.isdir(self.processor_path):
|
|
421
|
+
self.processor = CLIPProcessor.from_pretrained(self.processor_path, local_files_only=True)
|
|
422
|
+
else:
|
|
423
|
+
# Fallback to hub
|
|
424
|
+
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
425
|
+
except Exception as e:
|
|
426
|
+
print(f"Falling back to remote CLIPProcessor due to error loading local assets: {e}")
|
|
427
|
+
try:
|
|
428
|
+
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
429
|
+
except Exception as e2:
|
|
430
|
+
raise RuntimeError(
|
|
431
|
+
f"Failed to load CLIPProcessor from both local and remote: {e2}\n"
|
|
432
|
+
"Please ensure transformers package is properly installed and you have internet connection."
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
tok = self.processor.tokenizer(self.color_category, padding=True, return_tensors="np")
|
|
436
|
+
ort_inputs_text = {
|
|
437
|
+
"input_ids": tok["input_ids"].astype(np.int64),
|
|
438
|
+
"attention_mask": tok["attention_mask"].astype(np.int64)
|
|
439
|
+
}
|
|
440
|
+
text_out = self.text_sess.run(["text_embeds"], ort_inputs_text)[0].astype(np.float32)
|
|
441
|
+
self.text_embeds = text_out / np.linalg.norm(text_out, axis=-1, keepdims=True)
|
|
442
|
+
|
|
443
|
+
sample = self.processor(images=np.zeros((224, 224, 3), dtype=np.uint8), return_tensors="np")
|
|
444
|
+
self.pixel_template = sample["pixel_values"].astype(np.float32)
|
|
445
|
+
self.min_box_size = 32
|
|
446
|
+
self.max_batch = 32
|
|
447
|
+
# Classify every frame for stability unless changed by caller
|
|
448
|
+
self.frame_skip = 1
|
|
449
|
+
self.batch_pixels = np.zeros((self.max_batch, *self.pixel_template.shape[1:]), dtype=np.float32)
|
|
450
|
+
|
|
451
|
+
self.records: Dict[int, Dict[str, float]] = {}
|
|
452
|
+
self.frame_idx = 0
|
|
453
|
+
self.processed_frames = 0
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
def _resolve_processor_dir(self, processor_dir: Optional[str]) -> str:
|
|
457
|
+
"""
|
|
458
|
+
Find the absolute path to the bundled 'clip_processor' assets directory in the
|
|
459
|
+
installed package, independent of current working directory.
|
|
460
|
+
|
|
461
|
+
Resolution order:
|
|
462
|
+
1) Explicit processor_dir if provided.
|
|
463
|
+
2) Directory next to this file: <module_dir>/clip_processor
|
|
464
|
+
3) importlib.resources (Python 3.9+): matrice_analytics.post_processing.usecases.color/clip_processor
|
|
465
|
+
"""
|
|
466
|
+
if processor_dir:
|
|
467
|
+
return os.path.abspath(processor_dir)
|
|
468
|
+
|
|
469
|
+
# 2) Try path next to this file
|
|
470
|
+
module_dir = Path(__file__).resolve().parent
|
|
471
|
+
candidate = module_dir / "clip_processor"
|
|
472
|
+
if candidate.is_dir():
|
|
473
|
+
return str(candidate)
|
|
474
|
+
|
|
475
|
+
# 3) Try importlib.resources if available
|
|
476
|
+
try:
|
|
477
|
+
if ir_files is not None:
|
|
478
|
+
pkg = "matrice_analytics.post_processing.usecases.color"
|
|
479
|
+
res = ir_files(pkg).joinpath("clip_processor")
|
|
480
|
+
try:
|
|
481
|
+
# If packaged in a zip, materialize to a temp path
|
|
482
|
+
with ir_as_file(res) as p:
|
|
483
|
+
if Path(p).is_dir():
|
|
484
|
+
return str(p)
|
|
485
|
+
except Exception:
|
|
486
|
+
# If already a concrete path
|
|
487
|
+
if res and str(res):
|
|
488
|
+
return str(res)
|
|
489
|
+
except Exception:
|
|
490
|
+
pass
|
|
491
|
+
|
|
492
|
+
# Fallback to CWD-relative (last resort)
|
|
493
|
+
return os.path.abspath("clip_processor")
|
|
494
|
+
|
|
495
|
+
def process_color_in_frame(self, detections, input_bytes, zones: Optional[Dict[str, List[List[float]]]], stream_info):
|
|
496
|
+
# Serialize processing to avoid concurrent access and potential frame drops
|
|
497
|
+
with self._lock:
|
|
498
|
+
print("=== process_color_in_frame called ===")
|
|
499
|
+
print(f"Number of detections: {len(detections) if detections else 0}")
|
|
500
|
+
print(f"Input bytes length: {len(input_bytes) if input_bytes else 0}")
|
|
501
|
+
|
|
502
|
+
boxes = []
|
|
503
|
+
tracked_ids: List[int] = []
|
|
504
|
+
frame_number: Optional[int] = None
|
|
505
|
+
print(detections)
|
|
506
|
+
self.frame_idx += 1
|
|
507
|
+
|
|
508
|
+
if not detections:
|
|
509
|
+
print(f"Frame {self.frame_idx}: No detections provided")
|
|
510
|
+
self.processed_frames += 1
|
|
511
|
+
return {}
|
|
512
|
+
|
|
513
|
+
nparr = np.frombuffer(input_bytes, np.uint8) # convert bytes to numpy array
|
|
514
|
+
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # decode image
|
|
515
|
+
|
|
516
|
+
if image is None:
|
|
517
|
+
print(f"Frame {self.frame_idx}: Failed to decode image")
|
|
518
|
+
self.processed_frames += 1
|
|
519
|
+
return {}
|
|
520
|
+
|
|
521
|
+
# Step 2: Use decoded frame directly (BGR → RGB performed at crop time)
|
|
522
|
+
frame = image
|
|
523
|
+
if stream_info:
|
|
524
|
+
input_settings = stream_info.get("input_settings", {})
|
|
525
|
+
start_frame = input_settings.get("start_frame")
|
|
526
|
+
end_frame = input_settings.get("end_frame")
|
|
527
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
528
|
+
frame_number = start_frame
|
|
529
|
+
|
|
530
|
+
for det in detections:
|
|
531
|
+
bbox = det.get('bounding_box')
|
|
532
|
+
tid = det.get('track_id')
|
|
533
|
+
if not bbox or not tid:
|
|
534
|
+
continue
|
|
535
|
+
w = bbox['xmax'] - bbox['xmin']
|
|
536
|
+
h = bbox['ymax'] - bbox['ymin']
|
|
537
|
+
if w >= self.min_box_size and h >= self.min_box_size:
|
|
538
|
+
boxes.append(bbox)
|
|
539
|
+
tracked_ids.append(tid)
|
|
540
|
+
|
|
541
|
+
if not boxes:
|
|
542
|
+
print(f"Frame {self.frame_idx}: No cars in zone")
|
|
543
|
+
self.processed_frames += 1
|
|
544
|
+
return {}
|
|
545
|
+
|
|
546
|
+
# print(boxes)
|
|
547
|
+
# print(tracked_ids)
|
|
548
|
+
crops_for_model = []
|
|
549
|
+
map_trackidx_to_cropidx = []
|
|
550
|
+
for i, (bbox, tid) in enumerate(zip(boxes, tracked_ids)):
|
|
551
|
+
last_rec = self.records.get(tid)
|
|
552
|
+
should_classify = False
|
|
553
|
+
if last_rec is None:
|
|
554
|
+
should_classify = True
|
|
555
|
+
else:
|
|
556
|
+
if (self.frame_idx - last_rec.get("last_classified_frame", -999)) >= self.frame_skip:
|
|
557
|
+
should_classify = True
|
|
558
|
+
if should_classify:
|
|
559
|
+
x1, y1, x2, y2 = bbox['xmin'], bbox['ymin'], bbox['xmax'], bbox['ymax']
|
|
560
|
+
# crop safely - convert to integers
|
|
561
|
+
y1c, y2c = max(0, int(y1)), min(frame.shape[0], int(y2))
|
|
562
|
+
x1c, x2c = max(0, int(x1)), min(frame.shape[1], int(x2))
|
|
563
|
+
print(f"Cropping bbox: x1c={x1c}, y1c={y1c}, x2c={x2c}, y2c={y2c}, frame_shape={frame.shape}")
|
|
564
|
+
if y2c - y1c <= 0 or x2c - x1c <= 0:
|
|
565
|
+
print(f"Skipping invalid crop: dimensions {x2c-x1c}x{y2c-y1c}")
|
|
566
|
+
continue
|
|
567
|
+
crop = cv2.cvtColor(frame[y1c:y2c, x1c:x2c], cv2.COLOR_BGR2RGB)
|
|
568
|
+
map_trackidx_to_cropidx.append((tid, len(crops_for_model)))
|
|
569
|
+
# Pass raw numpy crop; resize handled in run_image_onnx_on_crops
|
|
570
|
+
crops_for_model.append(crop)
|
|
571
|
+
# print(f"Added crop for track_id {tid}")
|
|
572
|
+
# print(crops_for_model)
|
|
573
|
+
|
|
574
|
+
record = {} # Initialize record outside the if block
|
|
575
|
+
if crops_for_model:
|
|
576
|
+
img_embeds = self.run_image_onnx_on_crops(crops_for_model) # [N, D]
|
|
577
|
+
# compute similarity with text_embeds (shape [num_labels, D])
|
|
578
|
+
sims = img_embeds @ self.text_embeds.T # [N, num_labels]
|
|
579
|
+
# convert to probs
|
|
580
|
+
probs = np.exp(sims) / np.exp(sims).sum(axis=-1, keepdims=True) # softmax numerically simple
|
|
581
|
+
# print(probs)
|
|
582
|
+
|
|
583
|
+
# assign back to corresponding tracks
|
|
584
|
+
for (tid, crop_idx) in map_trackidx_to_cropidx:
|
|
585
|
+
prob = probs[crop_idx]
|
|
586
|
+
# print(prob)
|
|
587
|
+
best_idx = int(np.argmax(prob))
|
|
588
|
+
best_label = self.color_category[best_idx]
|
|
589
|
+
# print(best_label)
|
|
590
|
+
best_score = float(prob[best_idx])
|
|
591
|
+
# print(best_score)
|
|
592
|
+
|
|
593
|
+
rec = self.records.get(tid)
|
|
594
|
+
det_info = next((d for d in detections if d.get("track_id") == tid), {})
|
|
595
|
+
category_label = det_info.get("category", "unknown")
|
|
596
|
+
zone_name = det_info.get("zone_name", "Unknown_Zone")
|
|
597
|
+
record[tid] = {
|
|
598
|
+
"frame": self.frame_idx,
|
|
599
|
+
"color": best_label,
|
|
600
|
+
"confidence": best_score,
|
|
601
|
+
"track_id": tid,
|
|
602
|
+
"object_label": category_label,
|
|
603
|
+
"zone_name": zone_name,
|
|
604
|
+
"last_classified_frame": self.frame_idx,
|
|
605
|
+
}
|
|
606
|
+
print(record)
|
|
607
|
+
|
|
608
|
+
return record
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
def run_image_onnx_on_crops(self, crops):
|
|
612
|
+
valid_crops = []
|
|
613
|
+
for i, crop in enumerate(crops):
|
|
614
|
+
# Check if crop is PIL Image (only if PIL.Image is available)
|
|
615
|
+
if Image is not None and isinstance(crop, Image.Image):
|
|
616
|
+
crop = np.array(crop)
|
|
617
|
+
if not isinstance(crop, np.ndarray):
|
|
618
|
+
print(f"Skipping crop {i}: not a numpy array ({type(crop)})")
|
|
619
|
+
continue
|
|
620
|
+
if crop.size == 0:
|
|
621
|
+
print(f"Skipping crop {i}: empty array")
|
|
622
|
+
continue
|
|
623
|
+
|
|
624
|
+
try:
|
|
625
|
+
crop_resized = cv2.resize(crop, (224, 224), interpolation=cv2.INTER_LINEAR)
|
|
626
|
+
valid_crops.append(crop_resized)
|
|
627
|
+
except Exception as e:
|
|
628
|
+
print(f"Skipping crop {i}: resize failed ({e})")
|
|
629
|
+
|
|
630
|
+
if not valid_crops:
|
|
631
|
+
print("No valid crops to process")
|
|
632
|
+
return np.zeros((0, self.text_embeds.shape[-1]), dtype=np.float32)
|
|
633
|
+
|
|
634
|
+
# Convert all valid crops at once
|
|
635
|
+
|
|
636
|
+
#ToDO: Check if the processor and model.run is running on single thread and is uusing GPU. Latency should be <100ms.
|
|
637
|
+
|
|
638
|
+
pixel_values = self.processor(images=valid_crops, return_tensors="np")["pixel_values"]
|
|
639
|
+
n = pixel_values.shape[0]
|
|
640
|
+
self.batch_pixels[:n] = pixel_values
|
|
641
|
+
|
|
642
|
+
ort_inputs = {"pixel_values": self.batch_pixels[:n]}
|
|
643
|
+
img_out = self.image_sess.run(["image_embeds"], ort_inputs)[0].astype(np.float32)
|
|
644
|
+
|
|
645
|
+
return img_out / np.linalg.norm(img_out, axis=-1, keepdims=True)
|
|
646
|
+
|
|
647
|
+
|
|
648
|
+
def _is_in_zone(self, bbox, polygon: List[List[float]]) -> bool:
|
|
649
|
+
if not polygon:
|
|
650
|
+
return False
|
|
651
|
+
# print(bbox)
|
|
652
|
+
x1, y1, x2, y2 = bbox['xmin'], bbox['ymin'], bbox['xmax'], bbox['ymax']
|
|
653
|
+
# print(x1,x2,y1,y2)
|
|
654
|
+
# print(type(x1))
|
|
655
|
+
# print(polygon)
|
|
656
|
+
cx, cy = int((x1 + x2) / 2), int((y1 + y2) / 2)
|
|
657
|
+
polygon = np.array(polygon, dtype=np.int32)
|
|
658
|
+
return cv2.pointPolygonTest(polygon, (cx, cy), False) >= 0
|
|
659
|
+
|
|
660
|
+
|