matrice-analytics 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-analytics might be problematic. Click here for more details.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +142 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3188 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +681 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +1870 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +339 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +283 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +248 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +271 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1153 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1043 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +232 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1835 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +930 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1112 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +891 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +914 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1194 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +1728 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +950 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.2.dist-info/METADATA +481 -0
- matrice_analytics-0.1.2.dist-info/RECORD +160 -0
- matrice_analytics-0.1.2.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.2.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from typing import List, Dict, Tuple, Optional
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
import cv2
|
|
5
|
+
from scipy.special import softmax
|
|
6
|
+
import requests
|
|
7
|
+
try:
|
|
8
|
+
from transformers import CLIPProcessor
|
|
9
|
+
import onnxruntime as ort
|
|
10
|
+
from PIL import Image
|
|
11
|
+
except:
|
|
12
|
+
print("Unable to import onnxruntime")
|
|
13
|
+
|
|
14
|
+
def load_model_from_checkpoint(checkpoint_path,local_path):
|
|
15
|
+
"""
|
|
16
|
+
Load a model from checkpoint URL
|
|
17
|
+
"""
|
|
18
|
+
try:
|
|
19
|
+
print(f"Loading model from checkpoint: {checkpoint_path}")
|
|
20
|
+
|
|
21
|
+
# Check if checkpoint is a URL
|
|
22
|
+
if checkpoint_path.startswith(('http://', 'https://')):
|
|
23
|
+
# Download checkpoint from URL
|
|
24
|
+
response = requests.get(checkpoint_path, timeout = (30,200))
|
|
25
|
+
if response.status_code == 200:
|
|
26
|
+
with open(local_path, 'wb') as f:
|
|
27
|
+
f.write(response.content)
|
|
28
|
+
checkpoint_path = local_path
|
|
29
|
+
print(f"Downloaded checkpoint to {local_path}")
|
|
30
|
+
else:
|
|
31
|
+
print(f"Failed to download checkpoint from {checkpoint_path}")
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
# Load the model from the checkpoint
|
|
35
|
+
model = ort.InferenceSession(checkpoint_path, providers=["CUDAExecutionProvider","CPUExecutionProvider"])
|
|
36
|
+
print(f"{local_path} Model loaded successfully from checkpoint")
|
|
37
|
+
return model
|
|
38
|
+
|
|
39
|
+
except Exception as e:
|
|
40
|
+
print(f"Error loading model from checkpoint: {e}")
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class ClipProcessor:
|
|
46
|
+
def __init__(self,
|
|
47
|
+
image_model_path: str = 'https://s3.us-west-2.amazonaws.com/testing.resources/datasets/clip_image.onnx',
|
|
48
|
+
text_model_path: str = 'https://s3.us-west-2.amazonaws.com/testing.resources/datasets/clip_text.onnx',
|
|
49
|
+
processor_dir: str = './clip_processor',
|
|
50
|
+
providers: Optional[List[str]] = None):
|
|
51
|
+
|
|
52
|
+
self.color_category: List[str] = ["black", "white", "yellow", "gray", "red", "blue", "light blue",
|
|
53
|
+
"green", "brown"]
|
|
54
|
+
|
|
55
|
+
self.image_url: str = image_model_path
|
|
56
|
+
self.text_url: str = text_model_path
|
|
57
|
+
self.processor_path: str = processor_dir
|
|
58
|
+
|
|
59
|
+
self.image_sess = load_model_from_checkpoint(self.image_url,"clip_image.onnx")
|
|
60
|
+
self.text_sess = load_model_from_checkpoint(self.text_url,"clip_text.onnx")
|
|
61
|
+
|
|
62
|
+
self.processor = CLIPProcessor.from_pretrained(self.processor_path)
|
|
63
|
+
|
|
64
|
+
tok = self.processor.tokenizer(self.color_category, padding=True, return_tensors="np")
|
|
65
|
+
ort_inputs_text = {
|
|
66
|
+
"input_ids": tok["input_ids"].astype(np.int64),
|
|
67
|
+
"attention_mask": tok["attention_mask"].astype(np.int64)
|
|
68
|
+
}
|
|
69
|
+
text_out = self.text_sess.run(["text_embeds"], ort_inputs_text)[0].astype(np.float32)
|
|
70
|
+
self.text_embeds = text_out / np.linalg.norm(text_out, axis=-1, keepdims=True)
|
|
71
|
+
|
|
72
|
+
sample = self.processor(images=np.zeros((224, 224, 3), dtype=np.uint8), return_tensors="np")
|
|
73
|
+
self.pixel_template = sample["pixel_values"].astype(np.float32)
|
|
74
|
+
self.min_box_size = 32
|
|
75
|
+
self.max_batch = 32
|
|
76
|
+
self.frame_skip = 2
|
|
77
|
+
self.batch_pixels = np.zeros((self.max_batch, *self.pixel_template.shape[1:]), dtype=np.float32)
|
|
78
|
+
|
|
79
|
+
self.records: Dict[int, Dict[str, float]] = {}
|
|
80
|
+
self.frame_idx = 0
|
|
81
|
+
self.processed_frames = 0
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def process_color_in_frame(self, detections, input_bytes, zones: Optional[Dict[str, List[List[float]]]], stream_info):
|
|
85
|
+
boxes = []
|
|
86
|
+
tracked_ids: List[int] = []
|
|
87
|
+
frame_number: Optional[int] = None
|
|
88
|
+
# print(detections)
|
|
89
|
+
self.frame_idx+=1
|
|
90
|
+
nparr = np.frombuffer(input_bytes, np.uint8) # convert bytes to numpy array
|
|
91
|
+
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # decode image
|
|
92
|
+
|
|
93
|
+
# Step 2: Convert PIL → NumPy array
|
|
94
|
+
frame = np.array(image)
|
|
95
|
+
if stream_info:
|
|
96
|
+
input_settings = stream_info.get("input_settings", {})
|
|
97
|
+
start_frame = input_settings.get("start_frame")
|
|
98
|
+
end_frame = input_settings.get("end_frame")
|
|
99
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
100
|
+
frame_number = start_frame
|
|
101
|
+
|
|
102
|
+
for det in detections:
|
|
103
|
+
bbox = det.get('bounding_box')
|
|
104
|
+
tid = det.get('track_id')
|
|
105
|
+
zones = zones if zones else {}
|
|
106
|
+
for z_name, zone_polygon in zones.items():
|
|
107
|
+
if self._is_in_zone(bbox, zone_polygon):
|
|
108
|
+
w = bbox['xmax'] - bbox['xmin']
|
|
109
|
+
h = bbox['ymax'] - bbox['ymin']
|
|
110
|
+
if w >= self.min_box_size and h >= self.max_batch:
|
|
111
|
+
boxes.append(bbox)
|
|
112
|
+
tracked_ids.append(tid)
|
|
113
|
+
# print(boxes)
|
|
114
|
+
# print(tracked_ids)
|
|
115
|
+
if not boxes:
|
|
116
|
+
print(f"Frame {self.frame_idx}: No cars in zone")
|
|
117
|
+
self.processed_frames += 1
|
|
118
|
+
# print(f"Frame {frame_idx} processedms\n")
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
# print(boxes)
|
|
122
|
+
# print(tracked_ids)
|
|
123
|
+
crops_for_model = []
|
|
124
|
+
map_trackidx_to_cropidx = []
|
|
125
|
+
for i,(bbox, tid) in enumerate(zip(boxes, tracked_ids)):
|
|
126
|
+
last_rec = self.records.get(tid)
|
|
127
|
+
should_classify = False
|
|
128
|
+
if last_rec is None:
|
|
129
|
+
should_classify = True
|
|
130
|
+
else:
|
|
131
|
+
if (self.frame_idx - last_rec.get("last_classified_frame", -999)) >= self.frame_skip:
|
|
132
|
+
should_classify = True
|
|
133
|
+
if should_classify:
|
|
134
|
+
x1, y1, x2, y2 = bbox['xmin'], bbox['ymin'], bbox['xmax'], bbox['ymax']
|
|
135
|
+
# crop safely
|
|
136
|
+
y1c, y2c = max(0, y1), min(frame.shape[0], y2)
|
|
137
|
+
x1c, x2c = max(0, x1), min(frame.shape[1], x2)
|
|
138
|
+
if y2c - y1c <= 0 or x2c - x1c <= 0:
|
|
139
|
+
continue
|
|
140
|
+
crop = cv2.cvtColor(frame[y1c:y2c, x1c:x2c], cv2.COLOR_BGR2RGB)
|
|
141
|
+
pil_img = Image.fromarray(crop).resize((224, 224))
|
|
142
|
+
map_trackidx_to_cropidx.append((tid, len(crops_for_model)))
|
|
143
|
+
crops_for_model.append(pil_img)
|
|
144
|
+
# print(crops_for_model)
|
|
145
|
+
# print(map_trackidx_to_cropidx)
|
|
146
|
+
|
|
147
|
+
if crops_for_model:
|
|
148
|
+
record = {}
|
|
149
|
+
img_embeds = self.run_image_onnx_on_crops(crops_for_model) # [N, D]
|
|
150
|
+
# compute similarity with text_embeds (shape [num_labels, D])
|
|
151
|
+
sims = img_embeds @ self.text_embeds.T # [N, num_labels]
|
|
152
|
+
# convert to probs
|
|
153
|
+
probs = np.exp(sims) / np.exp(sims).sum(axis=-1, keepdims=True) # softmax numerically simple
|
|
154
|
+
# print(probs)
|
|
155
|
+
|
|
156
|
+
# assign back to corresponding tracks
|
|
157
|
+
for (tid, crop_idx) in map_trackidx_to_cropidx:
|
|
158
|
+
prob = probs[crop_idx]
|
|
159
|
+
# print(prob)
|
|
160
|
+
best_idx = int(np.argmax(prob))
|
|
161
|
+
best_label = self.color_category[best_idx]
|
|
162
|
+
# print(best_label)
|
|
163
|
+
best_score = float(prob[best_idx])
|
|
164
|
+
# print(best_score)
|
|
165
|
+
|
|
166
|
+
rec = self.records.get(tid)
|
|
167
|
+
# if rec is None:
|
|
168
|
+
record[tid] = {
|
|
169
|
+
"frame": self.frame_idx,
|
|
170
|
+
"color": best_label,
|
|
171
|
+
"confidence": best_score,
|
|
172
|
+
"track_id": tid,
|
|
173
|
+
"last_classified_frame": self.frame_idx,
|
|
174
|
+
}
|
|
175
|
+
# else:
|
|
176
|
+
# # update only if confidence improves
|
|
177
|
+
# if best_score > rec["confidence"]:
|
|
178
|
+
# rec["color"] = best_label
|
|
179
|
+
# rec["confidence"] = best_score
|
|
180
|
+
# rec["frame"] = self.frame_idx
|
|
181
|
+
# rec["last_classified_frame"] = self.frame_idx
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
return record
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def run_image_onnx_on_crops(self, crops):
|
|
188
|
+
valid_crops = []
|
|
189
|
+
for i, crop in enumerate(crops):
|
|
190
|
+
if isinstance(crop, Image.Image): # PIL.Image
|
|
191
|
+
crop = np.array(crop)
|
|
192
|
+
if not isinstance(crop, np.ndarray):
|
|
193
|
+
print(f"Skipping crop {i}: not a numpy array ({type(crop)})")
|
|
194
|
+
continue
|
|
195
|
+
if crop.size == 0:
|
|
196
|
+
print(f"Skipping crop {i}: empty array")
|
|
197
|
+
continue
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
crop_resized = cv2.resize(crop, (224, 224), interpolation=cv2.INTER_LINEAR)
|
|
201
|
+
valid_crops.append(crop_resized)
|
|
202
|
+
except Exception as e:
|
|
203
|
+
print(f"Skipping crop {i}: resize failed ({e})")
|
|
204
|
+
|
|
205
|
+
if not valid_crops:
|
|
206
|
+
print("⚠️ No valid crops to process")
|
|
207
|
+
return np.zeros((0, self.text_embeds.shape[-1]), dtype=np.float32)
|
|
208
|
+
|
|
209
|
+
# Convert all valid crops at once
|
|
210
|
+
pixel_values = self.processor(images=valid_crops, return_tensors="np")["pixel_values"]
|
|
211
|
+
n = pixel_values.shape[0]
|
|
212
|
+
self.batch_pixels[:n] = pixel_values
|
|
213
|
+
|
|
214
|
+
ort_inputs = {"pixel_values": self.batch_pixels[:n]}
|
|
215
|
+
img_out = self.image_sess.run(["image_embeds"], ort_inputs)[0].astype(np.float32)
|
|
216
|
+
|
|
217
|
+
return img_out / np.linalg.norm(img_out, axis=-1, keepdims=True)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def _is_in_zone(self, bbox, polygon: List[List[float]]) -> bool:
|
|
221
|
+
if not polygon:
|
|
222
|
+
return False
|
|
223
|
+
# print(bbox)
|
|
224
|
+
x1, y1, x2, y2 = bbox['xmin'], bbox['ymin'], bbox['xmax'], bbox['ymax']
|
|
225
|
+
# print(x1,x2,y1,y2)
|
|
226
|
+
# print(type(x1))
|
|
227
|
+
# print(polygon)
|
|
228
|
+
cx, cy = int((x1 + x2) / 2), int((y1 + y2) / 2)
|
|
229
|
+
polygon = np.array(polygon, dtype=np.int32)
|
|
230
|
+
return cv2.pointPolygonTest(polygon, (cx, cy), False) >= 0
|
|
231
|
+
|
|
232
|
+
|