coralnet-toolbox 0.0.74__py2.py3-none-any.whl → 0.0.76__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coralnet_toolbox/Annotations/QtPolygonAnnotation.py +57 -12
- coralnet_toolbox/Annotations/QtRectangleAnnotation.py +44 -14
- coralnet_toolbox/Explorer/QtDataItem.py +52 -22
- coralnet_toolbox/Explorer/QtExplorer.py +277 -1600
- coralnet_toolbox/Explorer/QtSettingsWidgets.py +101 -15
- coralnet_toolbox/Explorer/QtViewers.py +1568 -0
- coralnet_toolbox/Explorer/transformer_models.py +70 -0
- coralnet_toolbox/Explorer/yolo_models.py +112 -0
- coralnet_toolbox/IO/QtExportMaskAnnotations.py +538 -403
- coralnet_toolbox/Icons/system_monitor.png +0 -0
- coralnet_toolbox/MachineLearning/ImportDataset/QtBase.py +239 -147
- coralnet_toolbox/MachineLearning/VideoInference/YOLO3D/run.py +102 -16
- coralnet_toolbox/QtAnnotationWindow.py +16 -10
- coralnet_toolbox/QtEventFilter.py +4 -4
- coralnet_toolbox/QtImageWindow.py +3 -7
- coralnet_toolbox/QtMainWindow.py +104 -64
- coralnet_toolbox/QtProgressBar.py +1 -0
- coralnet_toolbox/QtSystemMonitor.py +370 -0
- coralnet_toolbox/Rasters/RasterTableModel.py +20 -0
- coralnet_toolbox/Results/ConvertResults.py +14 -8
- coralnet_toolbox/Results/ResultsProcessor.py +3 -2
- coralnet_toolbox/SAM/QtDeployGenerator.py +2 -5
- coralnet_toolbox/SAM/QtDeployPredictor.py +11 -3
- coralnet_toolbox/SeeAnything/QtDeployGenerator.py +146 -116
- coralnet_toolbox/SeeAnything/QtDeployPredictor.py +55 -9
- coralnet_toolbox/Tile/QtTileBatchInference.py +4 -4
- coralnet_toolbox/Tools/QtPolygonTool.py +42 -3
- coralnet_toolbox/Tools/QtRectangleTool.py +30 -0
- coralnet_toolbox/Tools/QtSAMTool.py +140 -91
- coralnet_toolbox/Transformers/Models/GroundingDINO.py +72 -0
- coralnet_toolbox/Transformers/Models/OWLViT.py +72 -0
- coralnet_toolbox/Transformers/Models/OmDetTurbo.py +68 -0
- coralnet_toolbox/Transformers/Models/QtBase.py +120 -0
- coralnet_toolbox/{AutoDistill → Transformers}/Models/__init__.py +1 -1
- coralnet_toolbox/{AutoDistill → Transformers}/QtBatchInference.py +15 -15
- coralnet_toolbox/{AutoDistill → Transformers}/QtDeployModel.py +18 -16
- coralnet_toolbox/{AutoDistill → Transformers}/__init__.py +1 -1
- coralnet_toolbox/__init__.py +1 -1
- coralnet_toolbox/utilities.py +21 -15
- {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/METADATA +13 -10
- {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/RECORD +45 -40
- coralnet_toolbox/AutoDistill/Models/GroundingDINO.py +0 -81
- coralnet_toolbox/AutoDistill/Models/OWLViT.py +0 -76
- coralnet_toolbox/AutoDistill/Models/OmDetTurbo.py +0 -75
- coralnet_toolbox/AutoDistill/Models/QtBase.py +0 -112
- {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/WHEEL +0 -0
- {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/entry_points.txt +0 -0
- {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/licenses/LICENSE.txt +0 -0
- {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/top_level.txt +0 -0
@@ -10,10 +10,10 @@ import torch
|
|
10
10
|
from torch.cuda import empty_cache
|
11
11
|
|
12
12
|
import pyqtgraph as pg
|
13
|
-
from pyqtgraph.Qt import QtGui
|
14
13
|
|
15
14
|
from ultralytics import YOLOE
|
16
15
|
from ultralytics.models.yolo.yoloe import YOLOEVPSegPredictor
|
16
|
+
from ultralytics.models.yolo.yoloe import YOLOEVPDetectPredictor
|
17
17
|
|
18
18
|
from PyQt5.QtCore import Qt
|
19
19
|
from PyQt5.QtWidgets import (QMessageBox, QVBoxLayout, QApplication, QFileDialog,
|
@@ -90,7 +90,7 @@ class DeployGeneratorDialog(QDialog):
|
|
90
90
|
self.imported_vpes = [] # VPEs loaded from file
|
91
91
|
self.reference_vpes = [] # VPEs created from reference images
|
92
92
|
|
93
|
-
self.device =
|
93
|
+
self.device = None # Will be set in showEvent
|
94
94
|
|
95
95
|
# Main vertical layout for the dialog
|
96
96
|
self.layout = QVBoxLayout(self)
|
@@ -195,6 +195,8 @@ class DeployGeneratorDialog(QDialog):
|
|
195
195
|
self.initialize_iou_threshold()
|
196
196
|
self.initialize_area_threshold()
|
197
197
|
|
198
|
+
# Update the device
|
199
|
+
self.device = self.main_window.device
|
198
200
|
# Configure the image window's UI elements for this specific dialog
|
199
201
|
self.configure_image_window_for_dialog()
|
200
202
|
# Sync with main window's images BEFORE updating labels
|
@@ -416,7 +418,7 @@ class DeployGeneratorDialog(QDialog):
|
|
416
418
|
|
417
419
|
# Image size control
|
418
420
|
self.imgsz_spinbox = QSpinBox()
|
419
|
-
self.imgsz_spinbox.setRange(
|
421
|
+
self.imgsz_spinbox.setRange(1024, 65536)
|
420
422
|
self.imgsz_spinbox.setSingleStep(1024)
|
421
423
|
self.imgsz_spinbox.setValue(self.imgsz)
|
422
424
|
layout.addRow("Image Size (imgsz):", self.imgsz_spinbox)
|
@@ -502,8 +504,13 @@ class DeployGeneratorDialog(QDialog):
|
|
502
504
|
|
503
505
|
main_layout.addLayout(button_row)
|
504
506
|
|
505
|
-
# Second row:
|
507
|
+
# Second row: VPE action buttons
|
506
508
|
vpe_row = QHBoxLayout()
|
509
|
+
|
510
|
+
generate_vpe_button = QPushButton("Generate VPEs")
|
511
|
+
generate_vpe_button.clicked.connect(self.generate_vpes_from_references)
|
512
|
+
vpe_row.addWidget(generate_vpe_button)
|
513
|
+
|
507
514
|
save_vpe_button = QPushButton("Save VPE")
|
508
515
|
save_vpe_button.clicked.connect(self.save_vpe)
|
509
516
|
vpe_row.addWidget(save_vpe_button)
|
@@ -762,20 +769,20 @@ class DeployGeneratorDialog(QDialog):
|
|
762
769
|
try:
|
763
770
|
# Load the VPE file
|
764
771
|
loaded_data = torch.load(file_path)
|
765
|
-
|
766
|
-
#
|
767
|
-
|
772
|
+
|
773
|
+
# Move tensors to the appropriate device
|
774
|
+
device = self.main_window.device
|
768
775
|
|
769
776
|
# Check format type and handle appropriately
|
770
777
|
if isinstance(loaded_data, list):
|
771
778
|
# New format: list of VPE tensors
|
772
|
-
self.imported_vpes = [vpe.to(
|
779
|
+
self.imported_vpes = [vpe.to(device) for vpe in loaded_data]
|
773
780
|
vpe_count = len(self.imported_vpes)
|
774
781
|
self.status_bar.setText(f"Loaded {vpe_count} VPE tensors from file")
|
775
782
|
|
776
783
|
elif isinstance(loaded_data, torch.Tensor):
|
777
784
|
# Legacy format: single tensor - convert to list for consistency
|
778
|
-
loaded_vpe = loaded_data.to(
|
785
|
+
loaded_vpe = loaded_data.to(device)
|
779
786
|
# Store as a single-item list
|
780
787
|
self.imported_vpes = [loaded_vpe]
|
781
788
|
self.status_bar.setText("Loaded 1 VPE tensor from file (legacy format)")
|
@@ -810,80 +817,75 @@ class DeployGeneratorDialog(QDialog):
|
|
810
817
|
|
811
818
|
def save_vpe(self):
|
812
819
|
"""
|
813
|
-
|
820
|
+
Saves the combined collection of VPEs (imported and pre-generated from references) to disk.
|
814
821
|
"""
|
815
|
-
|
816
|
-
self.update_stashed_references_from_ui()
|
817
|
-
|
818
|
-
# Create a list to hold all VPEs
|
819
|
-
all_vpes = []
|
820
|
-
|
821
|
-
# Add imported VPEs if available
|
822
|
-
if self.imported_vpes:
|
823
|
-
all_vpes.extend(self.imported_vpes)
|
822
|
+
QApplication.setOverrideCursor(Qt.WaitCursor)
|
824
823
|
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
# Reload the model to ensure clean state
|
829
|
-
self.reload_model()
|
824
|
+
try:
|
825
|
+
# Create a list to hold all VPEs to be saved
|
826
|
+
all_vpes = []
|
830
827
|
|
831
|
-
#
|
832
|
-
|
828
|
+
# Add imported VPEs if available
|
829
|
+
if self.imported_vpes:
|
830
|
+
all_vpes.extend(self.imported_vpes)
|
833
831
|
|
834
|
-
if
|
835
|
-
# Add new VPEs to collection
|
836
|
-
all_vpes.extend(new_vpes)
|
837
|
-
# Update reference_vpes with the new ones
|
838
|
-
self.reference_vpes = new_vpes
|
839
|
-
else:
|
840
|
-
# Include existing reference VPEs if we have them
|
832
|
+
# Add pre-generated reference VPEs if available
|
841
833
|
if self.reference_vpes:
|
842
834
|
all_vpes.extend(self.reference_vpes)
|
843
|
-
|
844
|
-
|
845
|
-
|
846
|
-
|
835
|
+
|
836
|
+
# Check if we have any VPEs to save
|
837
|
+
if not all_vpes:
|
838
|
+
QApplication.restoreOverrideCursor()
|
839
|
+
QMessageBox.warning(
|
840
|
+
self,
|
841
|
+
"No VPEs Available",
|
842
|
+
"No VPEs available to save. "
|
843
|
+
"Please either load a VPE file or generate VPEs from reference images first."
|
844
|
+
)
|
845
|
+
return
|
846
|
+
|
847
|
+
QApplication.restoreOverrideCursor()
|
848
|
+
|
849
|
+
file_path, _ = QFileDialog.getSaveFileName(
|
847
850
|
self,
|
848
|
-
"
|
849
|
-
"
|
851
|
+
"Save VPE Collection",
|
852
|
+
"",
|
853
|
+
"PyTorch Tensor (*.pt);;All Files (*)"
|
850
854
|
)
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
856
|
-
|
857
|
-
|
858
|
-
|
859
|
-
|
860
|
-
|
861
|
-
if not file_path:
|
862
|
-
return # User canceled the dialog
|
863
|
-
|
864
|
-
# Add .pt extension if not present
|
865
|
-
if not file_path.endswith('.pt'):
|
866
|
-
file_path += '.pt'
|
867
|
-
|
868
|
-
try:
|
869
|
-
# Move tensors to CPU before saving
|
855
|
+
|
856
|
+
if not file_path:
|
857
|
+
return
|
858
|
+
|
859
|
+
QApplication.setOverrideCursor(Qt.WaitCursor)
|
860
|
+
|
861
|
+
if not file_path.endswith('.pt'):
|
862
|
+
file_path += '.pt'
|
863
|
+
|
870
864
|
vpe_list_cpu = [vpe.cpu() for vpe in all_vpes]
|
871
865
|
|
872
|
-
# Save the list of tensors
|
873
866
|
torch.save(vpe_list_cpu, file_path)
|
874
867
|
|
875
868
|
self.status_bar.setText(f"Saved {len(all_vpes)} VPE tensors to {os.path.basename(file_path)}")
|
869
|
+
|
870
|
+
QApplication.restoreOverrideCursor()
|
876
871
|
QMessageBox.information(
|
877
872
|
self,
|
878
873
|
"VPE Saved",
|
879
874
|
f"Saved {len(all_vpes)} VPE tensors to {file_path}"
|
880
875
|
)
|
876
|
+
|
881
877
|
except Exception as e:
|
878
|
+
QApplication.restoreOverrideCursor()
|
882
879
|
QMessageBox.critical(
|
883
880
|
self,
|
884
881
|
"Error Saving VPE",
|
885
882
|
f"Failed to save VPE: {str(e)}"
|
886
883
|
)
|
884
|
+
finally:
|
885
|
+
try:
|
886
|
+
QApplication.restoreOverrideCursor()
|
887
|
+
except:
|
888
|
+
pass
|
887
889
|
|
888
890
|
def load_model(self):
|
889
891
|
"""
|
@@ -948,7 +950,7 @@ class DeployGeneratorDialog(QDialog):
|
|
948
950
|
self.model_path = self.model_combo.currentText()
|
949
951
|
|
950
952
|
# Load model using registry
|
951
|
-
self.loaded_model = YOLOE(self.model_path, verbose=False).to(self.device)
|
953
|
+
self.loaded_model = YOLOE(self.model_path, verbose=False).to(self.device)
|
952
954
|
|
953
955
|
# Create a dummy visual dictionary for standard model loading
|
954
956
|
visual_prompts = dict(
|
@@ -966,7 +968,7 @@ class DeployGeneratorDialog(QDialog):
|
|
966
968
|
self.loaded_model.predict(
|
967
969
|
np.zeros((640, 640, 3), dtype=np.uint8),
|
968
970
|
visual_prompts=visual_prompts.copy(), # This needs to happen to properly initialize the predictor
|
969
|
-
predictor=
|
971
|
+
predictor=YOLOEVPDetectPredictor if self.task == "detect" else YOLOEVPSegPredictor,
|
970
972
|
imgsz=640,
|
971
973
|
conf=0.99,
|
972
974
|
)
|
@@ -1108,6 +1110,9 @@ class DeployGeneratorDialog(QDialog):
|
|
1108
1110
|
# We will predict on the first work area/full image.
|
1109
1111
|
input_image = inputs[0]
|
1110
1112
|
|
1113
|
+
# Set the predictor
|
1114
|
+
predictor = YOLOEVPDetectPredictor if self.task == "detect" else YOLOEVPSegPredictor
|
1115
|
+
|
1111
1116
|
# Iterate through each reference image and its annotations
|
1112
1117
|
for ref_path, ref_annotations in reference_dict.items():
|
1113
1118
|
# The 'refer_image' parameter is the path to the current reference image
|
@@ -1123,7 +1128,7 @@ class DeployGeneratorDialog(QDialog):
|
|
1123
1128
|
results = self.loaded_model.predict(input_image,
|
1124
1129
|
refer_image=ref_path,
|
1125
1130
|
visual_prompts=visual_prompts,
|
1126
|
-
predictor=
|
1131
|
+
predictor=predictor,
|
1127
1132
|
imgsz=self.imgsz_spinbox.value(),
|
1128
1133
|
conf=self.main_window.get_uncertainty_thresh(),
|
1129
1134
|
iou=self.main_window.get_iou_thresh(),
|
@@ -1156,6 +1161,59 @@ class DeployGeneratorDialog(QDialog):
|
|
1156
1161
|
|
1157
1162
|
return [[combined_results]]
|
1158
1163
|
|
1164
|
+
def generate_vpes_from_references(self):
|
1165
|
+
"""
|
1166
|
+
Calculates VPEs from the currently highlighted reference images and
|
1167
|
+
stores them in self.reference_vpes, overwriting any previous ones.
|
1168
|
+
"""
|
1169
|
+
if not self.loaded_model:
|
1170
|
+
QMessageBox.warning(self, "No Model Loaded", "A model must be loaded before generating VPEs.")
|
1171
|
+
return
|
1172
|
+
|
1173
|
+
# Always sync with the live UI selection before generating.
|
1174
|
+
self.update_stashed_references_from_ui()
|
1175
|
+
references_dict = self._get_references()
|
1176
|
+
|
1177
|
+
if not references_dict:
|
1178
|
+
QMessageBox.information(
|
1179
|
+
self,
|
1180
|
+
"No References Selected",
|
1181
|
+
"Please highlight one or more reference images in the table to generate VPEs."
|
1182
|
+
)
|
1183
|
+
return
|
1184
|
+
|
1185
|
+
QApplication.setOverrideCursor(Qt.WaitCursor)
|
1186
|
+
progress_bar = ProgressBar(self, title="Generating VPEs")
|
1187
|
+
progress_bar.show()
|
1188
|
+
|
1189
|
+
try:
|
1190
|
+
# Make progress bar busy
|
1191
|
+
progress_bar.set_busy_mode("Generating VPEs...")
|
1192
|
+
# Reload the model to ensure a clean state for VPE generation
|
1193
|
+
self.reload_model()
|
1194
|
+
|
1195
|
+
# The references_to_vpe method will calculate and update self.reference_vpes
|
1196
|
+
new_vpes = self.references_to_vpe(references_dict, update_reference_vpes=True)
|
1197
|
+
|
1198
|
+
if new_vpes:
|
1199
|
+
num_vpes = len(new_vpes)
|
1200
|
+
num_images = len(references_dict)
|
1201
|
+
message = f"Successfully generated {num_vpes} VPEs from {num_images} reference image(s)."
|
1202
|
+
self.status_bar.setText(message)
|
1203
|
+
QMessageBox.information(self, "VPEs Generated", message)
|
1204
|
+
else:
|
1205
|
+
message = "Could not generate VPEs. Ensure annotations are valid."
|
1206
|
+
self.status_bar.setText(message)
|
1207
|
+
QMessageBox.warning(self, "Generation Failed", message)
|
1208
|
+
|
1209
|
+
except Exception as e:
|
1210
|
+
QMessageBox.critical(self, "Error Generating VPEs", f"An unexpected error occurred: {str(e)}")
|
1211
|
+
self.status_bar.setText("Error during VPE generation.")
|
1212
|
+
finally:
|
1213
|
+
QApplication.restoreOverrideCursor()
|
1214
|
+
progress_bar.stop_progress()
|
1215
|
+
progress_bar.close()
|
1216
|
+
|
1159
1217
|
def references_to_vpe(self, reference_dict, update_reference_vpes=True):
|
1160
1218
|
"""
|
1161
1219
|
Converts the contents of a reference dictionary to VPEs (Visual Prompt Embeddings).
|
@@ -1197,14 +1255,13 @@ class DeployGeneratorDialog(QDialog):
|
|
1197
1255
|
|
1198
1256
|
return vpe_list
|
1199
1257
|
|
1200
|
-
def _apply_model_using_vpe(self, inputs
|
1258
|
+
def _apply_model_using_vpe(self, inputs):
|
1201
1259
|
"""
|
1202
|
-
Apply the model to the inputs using
|
1203
|
-
and reference annotations.
|
1260
|
+
Apply the model to the inputs using pre-calculated VPEs from imported files
|
1261
|
+
and/or generated from reference annotations.
|
1204
1262
|
|
1205
1263
|
Args:
|
1206
1264
|
inputs (list): List of input images.
|
1207
|
-
references_dict (dict): Dictionary containing reference annotations for each image.
|
1208
1265
|
|
1209
1266
|
Returns:
|
1210
1267
|
list: List of prediction results.
|
@@ -1219,23 +1276,17 @@ class DeployGeneratorDialog(QDialog):
|
|
1219
1276
|
if self.imported_vpes:
|
1220
1277
|
combined_vpes.extend(self.imported_vpes)
|
1221
1278
|
|
1222
|
-
#
|
1223
|
-
if
|
1224
|
-
|
1225
|
-
reference_vpes = self.references_to_vpe(references_dict, update_reference_vpes=True)
|
1226
|
-
if reference_vpes:
|
1227
|
-
combined_vpes.extend(reference_vpes)
|
1228
|
-
else:
|
1229
|
-
# Use existing reference_vpes if we have them
|
1230
|
-
if self.reference_vpes:
|
1231
|
-
combined_vpes.extend(self.reference_vpes)
|
1279
|
+
# Add pre-generated reference VPEs if available
|
1280
|
+
if self.reference_vpes:
|
1281
|
+
combined_vpes.extend(self.reference_vpes)
|
1232
1282
|
|
1233
1283
|
# Check if we have any VPEs to use
|
1234
1284
|
if not combined_vpes:
|
1235
1285
|
QMessageBox.warning(
|
1236
1286
|
self,
|
1237
1287
|
"No VPEs Available",
|
1238
|
-
"No VPEs available for prediction.
|
1288
|
+
"No VPEs are available for prediction. "
|
1289
|
+
"Please either load a VPE file or generate VPEs from reference images."
|
1239
1290
|
)
|
1240
1291
|
return []
|
1241
1292
|
|
@@ -1260,7 +1311,7 @@ class DeployGeneratorDialog(QDialog):
|
|
1260
1311
|
retina_masks=self.task == "segment")
|
1261
1312
|
|
1262
1313
|
return [results]
|
1263
|
-
|
1314
|
+
|
1264
1315
|
def _apply_model(self, inputs):
|
1265
1316
|
"""
|
1266
1317
|
Apply the model to the target inputs. This method handles both image-based
|
@@ -1278,21 +1329,9 @@ class DeployGeneratorDialog(QDialog):
|
|
1278
1329
|
|
1279
1330
|
# Check if the user is using VPE or Reference Images
|
1280
1331
|
if self.reference_method_combo_box.currentText() == "VPE":
|
1281
|
-
#
|
1282
|
-
|
1283
|
-
|
1284
|
-
# If we have reference images selected but no imported VPEs yet,
|
1285
|
-
# warn the user only if we also don't have any reference images
|
1286
|
-
if not has_vpes and not references_dict:
|
1287
|
-
QMessageBox.warning(
|
1288
|
-
self,
|
1289
|
-
"No VPEs Available",
|
1290
|
-
"No VPEs available for prediction. Please either load a VPE file or select reference images."
|
1291
|
-
)
|
1292
|
-
return []
|
1293
|
-
|
1294
|
-
# Use the VPE method, which will combine imported and reference VPEs
|
1295
|
-
results = self._apply_model_using_vpe(inputs, references_dict)
|
1332
|
+
# The VPE method will use pre-loaded/pre-generated VPEs.
|
1333
|
+
# The internal checks for whether any VPEs exist are now inside _apply_model_using_vpe.
|
1334
|
+
results = self._apply_model_using_vpe(inputs)
|
1296
1335
|
else:
|
1297
1336
|
# Use Reference Images method - requires reference images
|
1298
1337
|
if not references_dict:
|
@@ -1405,16 +1444,9 @@ class DeployGeneratorDialog(QDialog):
|
|
1405
1444
|
|
1406
1445
|
def show_vpe(self):
|
1407
1446
|
"""
|
1408
|
-
Show a visualization of the VPEs using PyQtGraph.
|
1409
|
-
This method now always recalculates VPEs from the currently highlighted reference images.
|
1447
|
+
Show a visualization of the currently stored VPEs using PyQtGraph.
|
1410
1448
|
"""
|
1411
|
-
# Set cursor to busy while loading VPEs
|
1412
|
-
QApplication.setOverrideCursor(Qt.WaitCursor)
|
1413
|
-
|
1414
1449
|
try:
|
1415
|
-
# Always sync with the live UI selection before visualizing.
|
1416
|
-
self.update_stashed_references_from_ui()
|
1417
|
-
|
1418
1450
|
vpes_with_source = []
|
1419
1451
|
|
1420
1452
|
# 1. Add any VPEs that were loaded from a file
|
@@ -1422,37 +1454,35 @@ class DeployGeneratorDialog(QDialog):
|
|
1422
1454
|
for vpe in self.imported_vpes:
|
1423
1455
|
vpes_with_source.append((vpe, "Import"))
|
1424
1456
|
|
1425
|
-
# 2.
|
1426
|
-
|
1427
|
-
|
1428
|
-
|
1429
|
-
if references_dict:
|
1430
|
-
self.reload_model()
|
1431
|
-
new_reference_vpes = self.references_to_vpe(references_dict, update_reference_vpes=True)
|
1432
|
-
if new_reference_vpes:
|
1433
|
-
for vpe in new_reference_vpes:
|
1434
|
-
vpes_with_source.append((vpe, "Reference"))
|
1457
|
+
# 2. Add any pre-generated VPEs from reference images
|
1458
|
+
if self.reference_vpes:
|
1459
|
+
for vpe in self.reference_vpes:
|
1460
|
+
vpes_with_source.append((vpe, "Reference"))
|
1435
1461
|
|
1436
|
-
#
|
1462
|
+
# 3. Check if there is anything to visualize
|
1437
1463
|
if not vpes_with_source:
|
1438
1464
|
QMessageBox.warning(
|
1439
1465
|
self,
|
1440
1466
|
"No VPEs Available",
|
1441
|
-
"No VPEs available to visualize. Please
|
1467
|
+
"No VPEs available to visualize. Please load a VPE file or generate VPEs from references first."
|
1442
1468
|
)
|
1443
1469
|
return
|
1444
1470
|
|
1445
|
-
#
|
1471
|
+
# 4. Create the visualization dialog
|
1446
1472
|
all_vpe_tensors = [vpe for vpe, source in vpes_with_source]
|
1447
1473
|
averaged_vpe = torch.cat(all_vpe_tensors).mean(dim=0, keepdim=True)
|
1448
1474
|
final_vpe = torch.nn.functional.normalize(averaged_vpe, p=2, dim=-1)
|
1449
1475
|
|
1476
|
+
QApplication.setOverrideCursor(Qt.WaitCursor)
|
1477
|
+
|
1450
1478
|
dialog = VPEVisualizationDialog(vpes_with_source, final_vpe, self)
|
1479
|
+
QApplication.restoreOverrideCursor()
|
1480
|
+
|
1451
1481
|
dialog.exec_()
|
1452
1482
|
|
1453
|
-
|
1454
|
-
# Always restore cursor, even if an exception occurs
|
1483
|
+
except Exception as e:
|
1455
1484
|
QApplication.restoreOverrideCursor()
|
1485
|
+
QMessageBox.critical(self, "Error Visualizing VPE", f"An error occurred: {str(e)}")
|
1456
1486
|
|
1457
1487
|
def deactivate_model(self):
|
1458
1488
|
"""
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import warnings
|
2
2
|
|
3
|
-
import os
|
4
3
|
import gc
|
5
4
|
|
6
5
|
import numpy as np
|
@@ -14,7 +13,6 @@ from ultralytics.models.yolo.yoloe import YOLOEVPSegPredictor
|
|
14
13
|
from ultralytics.models.yolo.yoloe import YOLOEVPDetectPredictor
|
15
14
|
|
16
15
|
from PyQt5.QtCore import Qt
|
17
|
-
from PyQt5.QtGui import QColor
|
18
16
|
from PyQt5.QtWidgets import (QApplication, QComboBox, QDialog, QFormLayout,
|
19
17
|
QHBoxLayout, QLabel, QMessageBox, QPushButton,
|
20
18
|
QSlider, QSpinBox, QVBoxLayout, QGroupBox,
|
@@ -171,7 +169,7 @@ class DeployPredictorDialog(QDialog):
|
|
171
169
|
# Image size control
|
172
170
|
self.imgsz_spinbox = QSpinBox()
|
173
171
|
self.imgsz_spinbox.setRange(512, 65536)
|
174
|
-
self.imgsz_spinbox.setSingleStep(
|
172
|
+
self.imgsz_spinbox.setSingleStep(1024)
|
175
173
|
self.imgsz_spinbox.setValue(self.imgsz)
|
176
174
|
layout.addRow("Image Size (imgsz)", self.imgsz_spinbox)
|
177
175
|
|
@@ -322,6 +320,7 @@ class DeployPredictorDialog(QDialog):
|
|
322
320
|
def is_sam_model_deployed(self):
|
323
321
|
"""
|
324
322
|
Check if the SAM model is deployed and update the checkbox state accordingly.
|
323
|
+
If SAM is enabled for polygons, sync and disable the imgsz spinbox.
|
325
324
|
|
326
325
|
:return: Boolean indicating whether the SAM model is deployed
|
327
326
|
"""
|
@@ -334,9 +333,48 @@ class DeployPredictorDialog(QDialog):
|
|
334
333
|
self.use_sam_dropdown.setCurrentText("False")
|
335
334
|
QMessageBox.critical(self, "Error", "Please deploy the SAM model first.")
|
336
335
|
return False
|
336
|
+
|
337
|
+
# Check if SAM polygons are enabled
|
338
|
+
if self.use_sam_dropdown.currentText() == "True":
|
339
|
+
# Sync the imgsz spinbox with SAM's value
|
340
|
+
self.imgsz_spinbox.setValue(self.sam_dialog.imgsz_spinbox.value())
|
341
|
+
# Disable the spinbox
|
342
|
+
self.imgsz_spinbox.setEnabled(False)
|
343
|
+
|
344
|
+
# Connect SAM's imgsz_spinbox valueChanged signal to update our value
|
345
|
+
# First disconnect any existing connection to avoid duplicates
|
346
|
+
try:
|
347
|
+
self.sam_dialog.imgsz_spinbox.valueChanged.disconnect(self.update_from_sam_imgsz)
|
348
|
+
except TypeError:
|
349
|
+
# No connection exists yet
|
350
|
+
pass
|
351
|
+
|
352
|
+
# Connect the signal
|
353
|
+
self.sam_dialog.imgsz_spinbox.valueChanged.connect(self.update_from_sam_imgsz)
|
354
|
+
else:
|
355
|
+
# Re-enable the spinbox when SAM polygons are disabled
|
356
|
+
self.imgsz_spinbox.setEnabled(True)
|
357
|
+
|
358
|
+
# Disconnect the signal when SAM is disabled
|
359
|
+
try:
|
360
|
+
self.sam_dialog.imgsz_spinbox.valueChanged.disconnect(self.update_from_sam_imgsz)
|
361
|
+
except TypeError:
|
362
|
+
# No connection exists
|
363
|
+
pass
|
337
364
|
|
338
365
|
return True
|
339
366
|
|
367
|
+
def update_from_sam_imgsz(self, value):
|
368
|
+
"""
|
369
|
+
Update the SeeAnything image size when SAM's image size changes.
|
370
|
+
Only takes effect when SAM polygons are enabled.
|
371
|
+
|
372
|
+
Args:
|
373
|
+
value (int): The new image size value from SAM dialog
|
374
|
+
"""
|
375
|
+
if self.use_sam_dropdown.currentText() == "True":
|
376
|
+
self.imgsz_spinbox.setValue(value)
|
377
|
+
|
340
378
|
def load_model(self):
|
341
379
|
"""
|
342
380
|
Load the selected model.
|
@@ -368,11 +406,13 @@ class DeployPredictorDialog(QDialog):
|
|
368
406
|
self.loaded_model.predict(
|
369
407
|
np.zeros((640, 640, 3), dtype=np.uint8),
|
370
408
|
visual_prompts=visuals.copy(), # This needs to happen to properly initialize the predictor
|
371
|
-
predictor=
|
409
|
+
predictor=YOLOEVPDetectPredictor if self.task == 'detect' else YOLOEVPSegPredictor,
|
372
410
|
imgsz=640,
|
373
411
|
conf=0.99,
|
374
412
|
)
|
375
|
-
|
413
|
+
# Finish the progress bar
|
414
|
+
progress_bar.finish_progress()
|
415
|
+
# Update the status bar
|
376
416
|
self.status_bar.setText(f"Loaded ({self.model_path}")
|
377
417
|
QMessageBox.information(self.annotation_window, "Model Loaded", "Model loaded successfully")
|
378
418
|
|
@@ -388,7 +428,7 @@ class DeployPredictorDialog(QDialog):
|
|
388
428
|
progress_bar.stop_progress()
|
389
429
|
progress_bar.close()
|
390
430
|
progress_bar = None
|
391
|
-
|
431
|
+
|
392
432
|
def resize_image(self, image):
|
393
433
|
"""
|
394
434
|
Resize the image to the specified size.
|
@@ -503,12 +543,15 @@ class DeployPredictorDialog(QDialog):
|
|
503
543
|
|
504
544
|
# Get the scaled visual prompts
|
505
545
|
visual_prompts = self.scale_prompts(bboxes, masks)
|
546
|
+
|
547
|
+
# Set the predictor
|
548
|
+
predictor=YOLOEVPDetectPredictor if self.task == 'detect' else YOLOEVPSegPredictor
|
506
549
|
|
507
550
|
try:
|
508
551
|
# Make predictions
|
509
552
|
results = self.loaded_model.predict(self.resized_image,
|
510
|
-
visual_prompts=visual_prompts.copy(),
|
511
|
-
predictor=
|
553
|
+
visual_prompts=visual_prompts.copy(),
|
554
|
+
predictor=predictor,
|
512
555
|
imgsz=max(self.resized_image.shape[:2]),
|
513
556
|
conf=self.main_window.get_uncertainty_thresh(),
|
514
557
|
iou=self.main_window.get_iou_thresh(),
|
@@ -573,6 +616,9 @@ class DeployPredictorDialog(QDialog):
|
|
573
616
|
progress_bar = ProgressBar(self.annotation_window, title="Making Predictions")
|
574
617
|
progress_bar.show()
|
575
618
|
progress_bar.start_progress(len(target_images))
|
619
|
+
|
620
|
+
# Set the predictor
|
621
|
+
predictor = YOLOEVPDetectPredictor if self.task == 'detect' else YOLOEVPSegPredictor
|
576
622
|
|
577
623
|
for target_image in target_images:
|
578
624
|
|
@@ -581,7 +627,7 @@ class DeployPredictorDialog(QDialog):
|
|
581
627
|
results = self.loaded_model.predict(target_image,
|
582
628
|
refer_image=refer_image,
|
583
629
|
visual_prompts=visual_prompts.copy(),
|
584
|
-
predictor=
|
630
|
+
predictor=predictor,
|
585
631
|
imgsz=self.imgsz_spinbox.value(),
|
586
632
|
conf=self.main_window.get_uncertainty_thresh(),
|
587
633
|
iou=self.main_window.get_iou_thresh(),
|
@@ -37,8 +37,8 @@ class TileBatchInference(QDialog):
|
|
37
37
|
self.detect_dialog = main_window.detect_deploy_model_dialog
|
38
38
|
self.segment_dialog = main_window.segment_deploy_model_dialog
|
39
39
|
self.sam_dialog = main_window.sam_deploy_generator_dialog
|
40
|
-
self.
|
41
|
-
|
40
|
+
self.transformers_dialog = main_window.transformers_deploy_model_dialog
|
41
|
+
|
42
42
|
# Create a dictionary of the different model dialogs and their loaded models
|
43
43
|
self.model_dialogs = {}
|
44
44
|
|
@@ -167,8 +167,8 @@ class TileBatchInference(QDialog):
|
|
167
167
|
self.model_dialogs["Segment"] = self.segment_dialog
|
168
168
|
if self.sam_dialog and getattr(self.sam_dialog, "loaded_model", None):
|
169
169
|
self.model_dialogs["SAM Generator"] = self.sam_dialog
|
170
|
-
if self.
|
171
|
-
self.model_dialogs["
|
170
|
+
if self.transformers_dialog and getattr(self.transformers_dialog, "loaded_model", None):
|
171
|
+
self.model_dialogs["Transformers"] = self.transformers_dialog
|
172
172
|
|
173
173
|
# Update the model combo box with the available models
|
174
174
|
self.update_model_combo()
|
@@ -133,12 +133,51 @@ class PolygonTool(Tool):
|
|
133
133
|
return None
|
134
134
|
|
135
135
|
# Create the annotation with current points
|
136
|
-
# The polygon simplification is now handled inside the PolygonAnnotation class
|
137
136
|
if finished and len(self.points) > 2:
|
138
137
|
# Close the polygon
|
139
138
|
self.points.append(self.points[0])
|
140
|
-
|
141
|
-
|
139
|
+
|
140
|
+
# --- Validation for polygon size and shape ---
|
141
|
+
# Step 1: Remove duplicate or near-duplicate points
|
142
|
+
filtered_points = []
|
143
|
+
MIN_DISTANCE = 2.0 # Minimum distance between points in pixels
|
144
|
+
|
145
|
+
for i, point in enumerate(self.points):
|
146
|
+
# Skip if this point is too close to the previous one
|
147
|
+
if i > 0:
|
148
|
+
prev_point = filtered_points[-1]
|
149
|
+
distance = ((point.x() - prev_point.x())**2 + (point.y() - prev_point.y())**2)**0.5
|
150
|
+
if distance < MIN_DISTANCE:
|
151
|
+
continue
|
152
|
+
filtered_points.append(point)
|
153
|
+
|
154
|
+
# Step 2: Ensure we have enough points for a valid polygon
|
155
|
+
if len(filtered_points) < 4: # Need at least 3 + 1 closing point
|
156
|
+
# Create a small triangle/square if we don't have enough points
|
157
|
+
if len(filtered_points) > 0:
|
158
|
+
center_x = sum(p.x() for p in filtered_points) / len(filtered_points)
|
159
|
+
center_y = sum(p.y() for p in filtered_points) / len(filtered_points)
|
160
|
+
|
161
|
+
# Create a small polygon centered on the average of existing points
|
162
|
+
MIN_SIZE = 5.0
|
163
|
+
filtered_points = [
|
164
|
+
QPointF(center_x - MIN_SIZE, center_y - MIN_SIZE),
|
165
|
+
QPointF(center_x + MIN_SIZE, center_y - MIN_SIZE),
|
166
|
+
QPointF(center_x + MIN_SIZE, center_y + MIN_SIZE),
|
167
|
+
QPointF(center_x - MIN_SIZE, center_y + MIN_SIZE),
|
168
|
+
QPointF(center_x - MIN_SIZE, center_y - MIN_SIZE) # Close the polygon
|
169
|
+
]
|
170
|
+
|
171
|
+
QMessageBox.information(
|
172
|
+
self.annotation_window,
|
173
|
+
"Polygon Adjusted",
|
174
|
+
"The polygon had too few unique points and has been adjusted to a minimum size."
|
175
|
+
)
|
176
|
+
|
177
|
+
# Use the filtered points list instead of the original
|
178
|
+
self.points = filtered_points
|
179
|
+
|
180
|
+
# Create the annotation with validated points
|
142
181
|
annotation = PolygonAnnotation(self.points,
|
143
182
|
self.annotation_window.selected_label.short_label_code,
|
144
183
|
self.annotation_window.selected_label.long_label_code,
|