coralnet-toolbox 0.0.71__py2.py3-none-any.whl → 0.0.73__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. coralnet_toolbox/Annotations/QtRectangleAnnotation.py +31 -2
  2. coralnet_toolbox/AutoDistill/QtDeployModel.py +23 -12
  3. coralnet_toolbox/Explorer/QtDataItem.py +53 -21
  4. coralnet_toolbox/Explorer/QtExplorer.py +581 -276
  5. coralnet_toolbox/Explorer/QtFeatureStore.py +15 -0
  6. coralnet_toolbox/Explorer/QtSettingsWidgets.py +49 -7
  7. coralnet_toolbox/MachineLearning/DeployModel/QtDetect.py +22 -11
  8. coralnet_toolbox/MachineLearning/DeployModel/QtSegment.py +22 -10
  9. coralnet_toolbox/MachineLearning/ExportDataset/QtBase.py +61 -24
  10. coralnet_toolbox/MachineLearning/ExportDataset/QtClassify.py +5 -1
  11. coralnet_toolbox/MachineLearning/ExportDataset/QtDetect.py +19 -6
  12. coralnet_toolbox/MachineLearning/ExportDataset/QtSegment.py +21 -8
  13. coralnet_toolbox/QtAnnotationWindow.py +52 -16
  14. coralnet_toolbox/QtEventFilter.py +8 -2
  15. coralnet_toolbox/QtImageWindow.py +17 -18
  16. coralnet_toolbox/QtLabelWindow.py +1 -1
  17. coralnet_toolbox/QtMainWindow.py +203 -8
  18. coralnet_toolbox/Rasters/QtRaster.py +59 -7
  19. coralnet_toolbox/Rasters/RasterTableModel.py +34 -6
  20. coralnet_toolbox/SAM/QtBatchInference.py +0 -2
  21. coralnet_toolbox/SAM/QtDeployGenerator.py +22 -11
  22. coralnet_toolbox/SeeAnything/QtBatchInference.py +19 -221
  23. coralnet_toolbox/SeeAnything/QtDeployGenerator.py +1016 -0
  24. coralnet_toolbox/SeeAnything/QtDeployPredictor.py +69 -53
  25. coralnet_toolbox/SeeAnything/QtTrainModel.py +115 -45
  26. coralnet_toolbox/SeeAnything/__init__.py +2 -0
  27. coralnet_toolbox/Tools/QtResizeSubTool.py +6 -1
  28. coralnet_toolbox/Tools/QtSAMTool.py +150 -7
  29. coralnet_toolbox/Tools/QtSeeAnythingTool.py +220 -55
  30. coralnet_toolbox/Tools/QtSelectSubTool.py +6 -4
  31. coralnet_toolbox/Tools/QtSelectTool.py +48 -6
  32. coralnet_toolbox/Tools/QtWorkAreaTool.py +25 -13
  33. coralnet_toolbox/__init__.py +1 -1
  34. {coralnet_toolbox-0.0.71.dist-info → coralnet_toolbox-0.0.73.dist-info}/METADATA +1 -1
  35. {coralnet_toolbox-0.0.71.dist-info → coralnet_toolbox-0.0.73.dist-info}/RECORD +39 -38
  36. {coralnet_toolbox-0.0.71.dist-info → coralnet_toolbox-0.0.73.dist-info}/WHEEL +0 -0
  37. {coralnet_toolbox-0.0.71.dist-info → coralnet_toolbox-0.0.73.dist-info}/entry_points.txt +0 -0
  38. {coralnet_toolbox-0.0.71.dist-info → coralnet_toolbox-0.0.73.dist-info}/licenses/LICENSE.txt +0 -0
  39. {coralnet_toolbox-0.0.71.dist-info → coralnet_toolbox-0.0.73.dist-info}/top_level.txt +0 -0
@@ -149,6 +149,21 @@ class FeatureStore:
149
149
  index_path = f"{self.index_path_base}_{model_key}.faiss"
150
150
  print(f"Saving FAISS index for '{model_key}' to {index_path}")
151
151
  faiss.write_index(index_to_save, index_path)
152
+
153
+ def remove_features_for_annotation(self, annotation_id):
154
+ """
155
+ Removes an annotation's feature metadata from the SQLite database.
156
+ This effectively orphans the vector in the FAISS index, invalidating it.
157
+ """
158
+ try:
159
+ self.cursor.execute(
160
+ "DELETE FROM features WHERE annotation_id = ?",
161
+ (annotation_id,)
162
+ )
163
+ self.conn.commit()
164
+ print(f"Invalidated features for annotation_id: {annotation_id}")
165
+ except sqlite3.Error as e:
166
+ print(f"Error removing feature for annotation {annotation_id}: {e}")
152
167
 
153
168
  def close(self):
154
169
  """Closes the database connection."""
@@ -4,7 +4,7 @@ import warnings
4
4
  from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
5
5
  from PyQt5.QtWidgets import (QVBoxLayout, QHBoxLayout, QPushButton, QComboBox, QLabel,
6
6
  QWidget, QGroupBox, QSlider, QListWidget, QTabWidget,
7
- QLineEdit, QFileDialog, QFormLayout, QSpinBox)
7
+ QLineEdit, QFileDialog, QFormLayout, QSpinBox, QDoubleSpinBox)
8
8
 
9
9
  from coralnet_toolbox.MachineLearning.Community.cfg import get_available_configs
10
10
 
@@ -189,6 +189,48 @@ class SimilaritySettingsWidget(QWidget):
189
189
  'k': self.k_spinbox.value()
190
190
  }
191
191
 
192
+
193
+ class DuplicateSettingsWidget(QWidget):
194
+ """Widget for configuring duplicate detection parameters."""
195
+ parameters_changed = pyqtSignal(dict)
196
+
197
+ def __init__(self, parent=None):
198
+ super(DuplicateSettingsWidget, self).__init__(parent)
199
+ layout = QVBoxLayout(self)
200
+ layout.setContentsMargins(10, 10, 10, 10)
201
+
202
+ # Using a DoubleSpinBox for the distance threshold
203
+ self.threshold_spinbox = QDoubleSpinBox()
204
+ self.threshold_spinbox.setDecimals(3)
205
+ self.threshold_spinbox.setRange(0.0, 10.0)
206
+ self.threshold_spinbox.setSingleStep(0.01)
207
+ self.threshold_spinbox.setValue(0.1) # Default value for squared L2 distance
208
+ self.threshold_spinbox.setToolTip(
209
+ "Similarity Threshold (Squared L2 Distance).\n"
210
+ "Lower values mean more similar.\n"
211
+ "A value of 0 means identical features."
212
+ )
213
+
214
+ self.threshold_spinbox.valueChanged.connect(self._emit_parameters)
215
+
216
+ form_layout = QHBoxLayout()
217
+ form_layout.addWidget(QLabel("Threshold:"))
218
+ form_layout.addWidget(self.threshold_spinbox)
219
+ layout.addLayout(form_layout)
220
+
221
+ def _emit_parameters(self):
222
+ """Emits the current parameters."""
223
+ params = {
224
+ 'threshold': self.threshold_spinbox.value()
225
+ }
226
+ self.parameters_changed.emit(params)
227
+
228
+ def get_parameters(self):
229
+ """Returns the current parameters as a dictionary."""
230
+ return {
231
+ 'threshold': self.threshold_spinbox.value()
232
+ }
233
+
192
234
 
193
235
  class AnnotationSettingsWidget(QGroupBox):
194
236
  """Widget for filtering annotations by image, type, and label in a multi-column layout."""
@@ -213,7 +255,7 @@ class AnnotationSettingsWidget(QGroupBox):
213
255
  images_column.addWidget(images_label)
214
256
 
215
257
  self.images_list = QListWidget()
216
- self.images_list.setSelectionMode(QListWidget.MultiSelection)
258
+ self.images_list.setSelectionMode(QListWidget.ExtendedSelection)
217
259
  self.images_list.setMaximumHeight(50)
218
260
 
219
261
  if hasattr(self.main_window, 'image_window') and hasattr(self.main_window.image_window, 'raster_manager'):
@@ -241,7 +283,7 @@ class AnnotationSettingsWidget(QGroupBox):
241
283
  type_column.addWidget(type_label)
242
284
 
243
285
  self.annotation_type_list = QListWidget()
244
- self.annotation_type_list.setSelectionMode(QListWidget.MultiSelection)
286
+ self.annotation_type_list.setSelectionMode(QListWidget.ExtendedSelection)
245
287
  self.annotation_type_list.setMaximumHeight(50)
246
288
  self.annotation_type_list.addItems(["PatchAnnotation",
247
289
  "RectangleAnnotation",
@@ -269,7 +311,7 @@ class AnnotationSettingsWidget(QGroupBox):
269
311
  label_column.addWidget(label_label)
270
312
 
271
313
  self.label_list = QListWidget()
272
- self.label_list.setSelectionMode(QListWidget.MultiSelection)
314
+ self.label_list.setSelectionMode(QListWidget.ExtendedSelection)
273
315
  self.label_list.setMaximumHeight(50)
274
316
 
275
317
  if hasattr(self.main_window, 'label_window') and hasattr(self.main_window.label_window, 'labels'):
@@ -670,8 +712,8 @@ class EmbeddingSettingsWidget(QGroupBox):
670
712
 
671
713
  def apply_embedding(self):
672
714
  if self.explorer_window and hasattr(self.explorer_window, 'run_embedding_pipeline'):
673
- # Clear all selections before running embedding pipeline
674
- if hasattr(self.explorer_window, 'handle_selection_change'):
675
- self.explorer_window.handle_selection_change([])
715
+ # Clear all selections before running a new embedding pipeline.
716
+ if hasattr(self.explorer_window, '_clear_selections'):
717
+ self.explorer_window._clear_selections()
676
718
 
677
719
  self.explorer_window.run_embedding_pipeline()
@@ -123,18 +123,29 @@ class Detect(Base):
123
123
 
124
124
  def update_sam_task_state(self):
125
125
  """
126
- Centralized method to check if SAM is loaded and update task and dropdown accordingly.
126
+ Centralized method to check if SAM is loaded and update task accordingly.
127
+ If the user has selected to use SAM, this function ensures the task is set to 'segment'.
128
+ Crucially, it does NOT alter the task if SAM is not selected, respecting the
129
+ user's choice from the 'Task' dropdown.
127
130
  """
128
- sam_active = (
129
- self.sam_dialog is not None and
130
- self.sam_dialog.loaded_model is not None and
131
- self.use_sam_dropdown.currentText() == "True"
132
- )
133
- if sam_active:
134
- self.task = 'segment'
135
- else:
136
- self.task = 'detect'
137
- self.use_sam_dropdown.setCurrentText("False")
131
+ # Check if the user wants to use the SAM model
132
+ if self.use_sam_dropdown.currentText() == "True":
133
+ # SAM is requested. Check if it's actually available.
134
+ sam_is_available = (
135
+ hasattr(self, 'sam_dialog') and
136
+ self.sam_dialog is not None and
137
+ self.sam_dialog.loaded_model is not None
138
+ )
139
+
140
+ if sam_is_available:
141
+ # If SAM is wanted and available, the task must be segmentation.
142
+ self.task = 'segment'
143
+ else:
144
+ # If SAM is wanted but not available, revert the dropdown and do nothing else.
145
+ # The 'is_sam_model_deployed' function already handles showing an error message.
146
+ self.use_sam_dropdown.setCurrentText("False")
147
+
148
+ # If use_sam_dropdown is "False", do nothing. Let self.task be whatever the user set.
138
149
 
139
150
  def load_model(self):
140
151
  """
@@ -123,17 +123,29 @@ class Segment(Base):
123
123
 
124
124
  def update_sam_task_state(self):
125
125
  """
126
- Centralized method to check if SAM is loaded and update task and dropdown accordingly.
126
+ Centralized method to check if SAM is loaded and update task accordingly.
127
+ If the user has selected to use SAM, this function ensures the task is set to 'segment'.
128
+ Crucially, it does NOT alter the task if SAM is not selected, respecting the
129
+ user's choice from the 'Task' dropdown.
127
130
  """
128
- sam_active = (
129
- self.sam_dialog is not None and
130
- self.sam_dialog.loaded_model is not None and
131
- self.use_sam_dropdown.currentText() == "True"
132
- )
133
- if sam_active:
134
- self.task = 'segment'
135
- else:
136
- self.use_sam_dropdown.setCurrentText("False")
131
+ # Check if the user wants to use the SAM model
132
+ if self.use_sam_dropdown.currentText() == "True":
133
+ # SAM is requested. Check if it's actually available.
134
+ sam_is_available = (
135
+ hasattr(self, 'sam_dialog') and
136
+ self.sam_dialog is not None and
137
+ self.sam_dialog.loaded_model is not None
138
+ )
139
+
140
+ if sam_is_available:
141
+ # If SAM is wanted and available, the task must be segmentation.
142
+ self.task = 'segment'
143
+ else:
144
+ # If SAM is wanted but not available, revert the dropdown and do nothing else.
145
+ # The 'is_sam_model_deployed' function already handles showing an error message.
146
+ self.use_sam_dropdown.setCurrentText("False")
147
+
148
+ # If use_sam_dropdown is "False", do nothing. Let self.task be whatever the user set.
137
149
 
138
150
  def load_model(self):
139
151
  """
@@ -42,7 +42,7 @@ class Base(QDialog):
42
42
  self.annotation_window = main_window.annotation_window
43
43
  self.image_window = main_window.image_window
44
44
 
45
- self.resize(1000, 600)
45
+ self.resize(1000, 800)
46
46
  self.setWindowIcon(get_icon("coral.png"))
47
47
  self.setWindowTitle("Export Dataset")
48
48
 
@@ -64,10 +64,8 @@ class Base(QDialog):
64
64
  self.setup_output_layout()
65
65
  # Setup the ratio layout
66
66
  self.setup_ratio_layout()
67
- # Setup the annotation layout
68
- self.setup_annotation_layout()
69
- # Setup the options layout
70
- self.setup_options_layout()
67
+ # Setup the data selection layout
68
+ self.setup_data_selection_layout()
71
69
  # Setup the table layout
72
70
  self.setup_table_layout()
73
71
  # Setup the status layout
@@ -147,10 +145,25 @@ class Base(QDialog):
147
145
  group_box.setLayout(layout)
148
146
  self.layout.addWidget(group_box)
149
147
 
150
- def setup_annotation_layout(self):
151
- """Setup the annotation type checkboxes layout."""
148
+ def setup_data_selection_layout(self):
149
+ """Setup the layout for data selection options in a horizontal arrangement."""
150
+ options_layout = QHBoxLayout()
151
+
152
+ # Create and add the group boxes
153
+ annotation_types_group = self.create_annotation_layout()
154
+ image_options_group = self.create_image_source_layout()
155
+ negative_samples_group = self.create_negative_samples_layout()
156
+
157
+ options_layout.addWidget(annotation_types_group)
158
+ options_layout.addWidget(image_options_group)
159
+ options_layout.addWidget(negative_samples_group)
160
+
161
+ self.layout.addLayout(options_layout)
162
+
163
+ def create_annotation_layout(self):
164
+ """Creates the annotation type checkboxes layout group box."""
152
165
  group_box = QGroupBox("Annotation Types")
153
- layout = QHBoxLayout()
166
+ layout = QVBoxLayout()
154
167
 
155
168
  self.include_patches_checkbox = QCheckBox("Include Patch Annotations")
156
169
  self.include_rectangles_checkbox = QCheckBox("Include Rectangle Annotations")
@@ -161,30 +174,24 @@ class Base(QDialog):
161
174
  layout.addWidget(self.include_polygons_checkbox)
162
175
 
163
176
  group_box.setLayout(layout)
164
- self.layout.addWidget(group_box)
177
+ return group_box
165
178
 
166
- def setup_options_layout(self):
167
- """Setup the image options layout."""
168
- group_box = QGroupBox("Image Options")
169
- layout = QHBoxLayout() # Changed from QVBoxLayout to QHBoxLayout
179
+ def create_image_source_layout(self):
180
+ """Creates the image source options layout group box."""
181
+ group_box = QGroupBox("Image Source")
182
+ layout = QVBoxLayout()
170
183
 
171
- # Create a button group for the image checkboxes
172
184
  self.image_options_group = QButtonGroup(self)
173
185
 
174
186
  self.all_images_radio = QRadioButton("All Images")
175
187
  self.filtered_images_radio = QRadioButton("Filtered Images")
176
188
 
177
- # Add the radio buttons to the button group
178
189
  self.image_options_group.addButton(self.all_images_radio)
179
190
  self.image_options_group.addButton(self.filtered_images_radio)
180
-
181
- # Ensure only one radio button can be checked at a time
182
191
  self.image_options_group.setExclusive(True)
183
192
 
184
- # Set the default radio button
185
193
  self.all_images_radio.setChecked(True)
186
194
 
187
- # Connect radio button signals
188
195
  self.all_images_radio.toggled.connect(self.update_image_selection)
189
196
  self.filtered_images_radio.toggled.connect(self.update_image_selection)
190
197
 
@@ -192,7 +199,32 @@ class Base(QDialog):
192
199
  layout.addWidget(self.filtered_images_radio)
193
200
 
194
201
  group_box.setLayout(layout)
195
- self.layout.addWidget(group_box)
202
+ return group_box
203
+
204
+ def create_negative_samples_layout(self):
205
+ """Creates the negative sample options layout group box."""
206
+ group_box = QGroupBox("Negative Samples")
207
+ layout = QVBoxLayout()
208
+
209
+ self.negative_samples_group = QButtonGroup(self)
210
+
211
+ self.include_negatives_radio = QRadioButton("Include Negatives")
212
+ self.exclude_negatives_radio = QRadioButton("Exclude Negatives")
213
+
214
+ self.negative_samples_group.addButton(self.include_negatives_radio)
215
+ self.negative_samples_group.addButton(self.exclude_negatives_radio)
216
+ self.negative_samples_group.setExclusive(True)
217
+
218
+ self.exclude_negatives_radio.setChecked(True)
219
+
220
+ # Connect to update stats when changed. Only one needed for the group.
221
+ self.include_negatives_radio.toggled.connect(self.update_summary_statistics)
222
+
223
+ layout.addWidget(self.include_negatives_radio)
224
+ layout.addWidget(self.exclude_negatives_radio)
225
+
226
+ group_box.setLayout(layout)
227
+ return group_box
196
228
 
197
229
  def setup_table_layout(self):
198
230
  """Setup the label counts table layout."""
@@ -424,6 +456,11 @@ class Base(QDialog):
424
456
  else:
425
457
  images = self.image_window.raster_manager.image_paths
426
458
 
459
+ # If "Exclude Negatives" is checked, only use images that have selected annotations.
460
+ if self.exclude_negatives_radio.isChecked():
461
+ image_paths_with_annotations = {a.image_path for a in self.selected_annotations}
462
+ images = [img for img in images if img in image_paths_with_annotations]
463
+
427
464
  random.shuffle(images)
428
465
 
429
466
  train_split = int(len(images) * self.train_ratio)
@@ -551,9 +588,6 @@ class Base(QDialog):
551
588
 
552
589
  self.updating_summary_statistics = True
553
590
 
554
- # Split the data by images
555
- self.split_data()
556
-
557
591
  # Selected labels based on user's selection
558
592
  self.selected_labels = []
559
593
  for row in range(self.label_counts_table.rowCount()):
@@ -564,6 +598,9 @@ class Base(QDialog):
564
598
 
565
599
  # Filter annotations based on the selected annotation types and current tab
566
600
  self.selected_annotations = self.filter_annotations()
601
+
602
+ # Split the data by images
603
+ self.split_data()
567
604
 
568
605
  # Split the data by annotations
569
606
  self.determine_splits()
@@ -704,4 +741,4 @@ class Base(QDialog):
704
741
  raise NotImplementedError("Method must be implemented in the subclass.")
705
742
 
706
743
  def process_annotations(self, annotations, split_dir, split):
707
- raise NotImplementedError("Method must be implemented in the subclass.")
744
+ raise NotImplementedError("Method must be implemented in the subclass.")
@@ -60,6 +60,10 @@ class Classify(Base):
60
60
  self.include_polygons_checkbox.setChecked(True)
61
61
  self.include_polygons_checkbox.setEnabled(True)
62
62
 
63
+ # Disable negative sample options for classification
64
+ self.include_negatives_radio.setEnabled(False)
65
+ self.exclude_negatives_radio.setEnabled(False)
66
+
63
67
  def create_dataset(self, output_dir_path):
64
68
  """
65
69
  Create an image classification dataset.
@@ -219,4 +223,4 @@ class Classify(Base):
219
223
  progress_bar.stop_progress()
220
224
  progress_bar.close()
221
225
  progress_bar = None
222
- gc.collect()
226
+ gc.collect()
@@ -53,6 +53,10 @@ class Detect(Base):
53
53
  self.include_rectangles_checkbox.setEnabled(True) # Enable user to uncheck rectangles if desired
54
54
  self.include_polygons_checkbox.setChecked(True)
55
55
  self.include_polygons_checkbox.setEnabled(True) # Already enabled
56
+
57
+ # Explicitly enable negative sample options for detection
58
+ self.include_negatives_radio.setEnabled(True)
59
+ self.exclude_negatives_radio.setEnabled(True)
56
60
 
57
61
  def create_dataset(self, output_dir_path):
58
62
  """
@@ -106,12 +110,20 @@ class Detect(Base):
106
110
  Process and save detection annotations.
107
111
 
108
112
  Args:
109
- annotations (list): List of annotations.
113
+ annotations (list): List of annotations for this split.
110
114
  split_dir (str): Path to the split directory.
111
115
  split (str): Split name (e.g., "Training", "Validation", "Testing").
112
116
  """
113
- # Get unique image paths
114
- image_paths = list(set(a.image_path for a in annotations))
117
+ # Determine the full list of images for this split (including negatives)
118
+ if split == "Training":
119
+ image_paths = self.train_images
120
+ elif split == "Validation":
121
+ image_paths = self.val_images
122
+ elif split == "Testing":
123
+ image_paths = self.test_images
124
+ else:
125
+ image_paths = []
126
+
115
127
  if not image_paths:
116
128
  return
117
129
 
@@ -124,6 +136,7 @@ class Detect(Base):
124
136
  for image_path in image_paths:
125
137
  yolo_annotations = []
126
138
  image_height, image_width = rasterio_open(image_path).shape
139
+ # Filter the annotations passed to this function to get only those for the current image
127
140
  image_annotations = [a for a in annotations if a.image_path == image_path]
128
141
 
129
142
  for image_annotation in image_annotations:
@@ -132,11 +145,11 @@ class Detect(Base):
132
145
  yolo_annotations.append(f"{class_number} {annotation}")
133
146
 
134
147
  # Save the annotations to a text file
135
- file_ext = image_path.split(".")[-1]
136
- text_file = os.path.basename(image_path).replace(f".{file_ext}", ".txt")
148
+ file_ext = os.path.splitext(image_path)[1]
149
+ text_file = os.path.basename(image_path).replace(file_ext, ".txt")
137
150
  text_path = os.path.join(f"{split_dir}/labels", text_file)
138
151
 
139
- # Write the annotations to the text file
152
+ # Write the annotations to the text file (creates an empty file for negatives)
140
153
  with open(text_path, 'w') as f:
141
154
  for annotation in yolo_annotations:
142
155
  f.write(annotation + '\n')
@@ -53,6 +53,10 @@ class Segment(Base):
53
53
  self.include_rectangles_checkbox.setEnabled(True) # Enable rectangles for segmentation
54
54
  self.include_polygons_checkbox.setChecked(True)
55
55
  self.include_polygons_checkbox.setEnabled(True) # Enable user to uncheck polygons if desired
56
+
57
+ # Explicitly enable negative sample options for segmentation
58
+ self.include_negatives_radio.setEnabled(True)
59
+ self.exclude_negatives_radio.setEnabled(True)
56
60
 
57
61
  def create_dataset(self, output_dir_path):
58
62
  """
@@ -106,12 +110,20 @@ class Segment(Base):
106
110
  Process and save segmentation annotations.
107
111
 
108
112
  Args:
109
- annotations (list): List of annotations.
113
+ annotations (list): List of annotations for this split.
110
114
  split_dir (str): Path to the split directory.
111
115
  split (str): Split name (e.g., "Training", "Validation", "Testing").
112
116
  """
113
- # Get unique image paths
114
- image_paths = list(set(a.image_path for a in annotations))
117
+ # Determine the full list of images for this split (including negatives)
118
+ if split == "Training":
119
+ image_paths = self.train_images
120
+ elif split == "Validation":
121
+ image_paths = self.val_images
122
+ elif split == "Testing":
123
+ image_paths = self.test_images
124
+ else:
125
+ image_paths = []
126
+
115
127
  if not image_paths:
116
128
  return
117
129
 
@@ -124,6 +136,7 @@ class Segment(Base):
124
136
  for image_path in image_paths:
125
137
  yolo_annotations = []
126
138
  image_height, image_width = rasterio_open(image_path).shape
139
+ # Filter the annotations passed to this function to get only those for the current image
127
140
  image_annotations = [a for a in annotations if a.image_path == image_path]
128
141
 
129
142
  for image_annotation in image_annotations:
@@ -132,11 +145,11 @@ class Segment(Base):
132
145
  yolo_annotations.append(f"{class_number} {annotation}")
133
146
 
134
147
  # Save the annotations to a text file
135
- file_ext = image_path.split(".")[-1]
136
- text_file = os.path.basename(image_path).replace(f".{file_ext}", ".txt")
148
+ file_ext = os.path.splitext(image_path)[1]
149
+ text_file = os.path.basename(image_path).replace(file_ext, ".txt")
137
150
  text_path = os.path.join(f"{split_dir}/labels", text_file)
138
151
 
139
- # Write the annotations to the text file
152
+ # Write the annotations to the text file (creates an empty file for negatives)
140
153
  with open(text_path, 'w') as f:
141
154
  for annotation in yolo_annotations:
142
155
  f.write(annotation + '\n')
@@ -146,7 +159,7 @@ class Segment(Base):
146
159
 
147
160
  progress_bar.update_progress()
148
161
 
149
- # Make cursor normal
162
+ # Reset cursor
150
163
  QApplication.restoreOverrideCursor()
151
164
  progress_bar.stop_progress()
152
- progress_bar.close()
165
+ progress_bar.close()
@@ -48,6 +48,7 @@ class AnnotationWindow(QGraphicsView):
48
48
  annotationSelected = pyqtSignal(int) # Signal to emit when annotation is selected
49
49
  annotationDeleted = pyqtSignal(str) # Signal to emit when annotation is deleted
50
50
  annotationCreated = pyqtSignal(str) # Signal to emit when annotation is created
51
+ annotationModified = pyqtSignal(str) # Signal to emit when annotation is modified
51
52
 
52
53
  def __init__(self, main_window, parent=None):
53
54
  """Initialize the annotation window with the main window and parent widget."""
@@ -374,6 +375,9 @@ class AnnotationWindow(QGraphicsView):
374
375
 
375
376
  def set_image(self, image_path):
376
377
  """Set and display an image at the given path."""
378
+ # Calculate GDIs for Windows if needed
379
+ self.main_window.check_windows_gdi_count()
380
+
377
381
  # Clean up
378
382
  self.clear_scene()
379
383
 
@@ -404,10 +408,6 @@ class AnnotationWindow(QGraphicsView):
404
408
 
405
409
  self.toggle_cursor_annotation()
406
410
 
407
- # Set the image dimensions, and current view in status bar
408
- self.imageLoaded.emit(self.pixmap_image.width(), self.pixmap_image.height())
409
- self.viewChanged.emit(self.pixmap_image.width(), self.pixmap_image.height())
410
-
411
411
  # Load all associated annotations
412
412
  self.load_annotations()
413
413
  # Update the image window's image annotations
@@ -417,6 +417,10 @@ class AnnotationWindow(QGraphicsView):
417
417
 
418
418
  QApplication.processEvents()
419
419
 
420
+ # Set the image dimensions, and current view in status bar
421
+ self.imageLoaded.emit(self.pixmap_image.width(), self.pixmap_image.height())
422
+ self.viewChanged.emit(self.pixmap_image.width(), self.pixmap_image.height())
423
+
420
424
  def update_current_image_path(self, image_path):
421
425
  """Update the current image path being displayed."""
422
426
  self.current_image_path = image_path
@@ -462,29 +466,57 @@ class AnnotationWindow(QGraphicsView):
462
466
  self.centerOn(annotation_center)
463
467
 
464
468
  def center_on_annotation(self, annotation):
465
- """Center and zoom in to focus on the specified annotation."""
469
+ """Center and zoom in to focus on the specified annotation with dynamic padding."""
466
470
  # Create graphics item if it doesn't exist
467
471
  if not annotation.graphics_item:
468
472
  annotation.create_graphics_item(self.scene)
469
473
 
470
474
  # Get the bounding rect of the annotation in scene coordinates
471
475
  annotation_rect = annotation.graphics_item.boundingRect()
472
-
473
- # Add some padding around the annotation (20% on each side)
474
- padding_x = annotation_rect.width() * 0.2
475
- padding_y = annotation_rect.height() * 0.2
476
+
477
+ # Step 1: Calculate annotation and image area
478
+ annotation_area = annotation_rect.width() * annotation_rect.height()
479
+ if self.pixmap_image:
480
+ image_width = self.pixmap_image.width()
481
+ image_height = self.pixmap_image.height()
482
+ else:
483
+ # Fallback to scene rect if image not loaded
484
+ image_width = self.scene.sceneRect().width()
485
+ image_height = self.scene.sceneRect().height()
486
+ image_area = image_width * image_height
487
+
488
+ # Step 2: Compute the relative area ratio (avoid division by zero)
489
+ if image_area > 0:
490
+ relative_area = annotation_area / image_area
491
+ else:
492
+ relative_area = 1.0 # fallback, treat as full image
493
+
494
+ # Step 3: Map ratio to padding factor (smaller annotation = more padding)
495
+ # Example: padding_factor = clamp(0.5 * (1/relative_area)**0.5, 0.1, 0.5)
496
+ # - For very small annotations, padding approaches 0.5 (50%)
497
+ # - For large annotations, padding approaches 0.1 (10%)
498
+ import math
499
+ min_padding = 0.1 # 10%
500
+ max_padding = 0.5 # 50%
501
+ if relative_area > 0:
502
+ padding_factor = max(min(0.5 * (1 / math.sqrt(relative_area)), max_padding), min_padding)
503
+ else:
504
+ padding_factor = min_padding
505
+
506
+ # Step 4: Apply dynamic padding
507
+ padding_x = annotation_rect.width() * padding_factor
508
+ padding_y = annotation_rect.height() * padding_factor
476
509
  padded_rect = annotation_rect.adjusted(-padding_x, -padding_y, padding_x, padding_y)
477
-
510
+
478
511
  # Fit the padded annotation rect in the view
479
512
  self.fitInView(padded_rect, Qt.KeepAspectRatio)
480
-
513
+
481
514
  # Update the zoom factor based on the new view transformation
482
- # We can calculate this by comparing the viewport size to the scene rect size
483
515
  view_rect = self.viewport().rect()
484
- zoom_x = view_rect.width() / padded_rect.width()
516
+ zoom_x = view_rect.width() / padded_rect.width()
485
517
  zoom_y = view_rect.height() / padded_rect.height()
486
518
  self.zoom_factor = min(zoom_x, zoom_y)
487
-
519
+
488
520
  # Signal that the view has changed
489
521
  self.viewChanged.emit(*self.get_image_dimensions())
490
522
 
@@ -543,7 +575,7 @@ class AnnotationWindow(QGraphicsView):
543
575
  return type(self.selected_annotations[0])
544
576
  return None
545
577
 
546
- def select_annotation(self, annotation, multi_select=False):
578
+ def select_annotation(self, annotation, multi_select=False, quiet_mode=False):
547
579
  """Select an annotation and update the UI accordingly."""
548
580
  # If the annotation is already selected and Ctrl is pressed, unselect it
549
581
  if annotation in self.selected_annotations and multi_select:
@@ -569,7 +601,11 @@ class AnnotationWindow(QGraphicsView):
569
601
 
570
602
  # If this is the only selected annotation, update label window and confidence window
571
603
  if len(self.selected_annotations) == 1:
572
- self.labelSelected.emit(annotation.label.id)
604
+
605
+ if not quiet_mode:
606
+ # Emit the label selected signal, unless in quiet mode.
607
+ # This is in Explorer to avoid overwriting preview label.
608
+ self.labelSelected.emit(annotation.label.id)
573
609
 
574
610
  # Make sure we have a cropped image
575
611
  if not annotation.cropped_image:
@@ -23,6 +23,7 @@ class GlobalEventFilter(QObject):
23
23
  self.detect_deploy_model_dialog = main_window.detect_deploy_model_dialog
24
24
  self.segment_deploy_model_dialog = main_window.segment_deploy_model_dialog
25
25
  self.sam_deploy_generator_dialog = main_window.sam_deploy_generator_dialog
26
+ self.see_anything_deploy_generator_dialog = main_window.see_anything_deploy_generator_dialog
26
27
  self.auto_distill_deploy_model_dialog = main_window.auto_distill_deploy_model_dialog
27
28
 
28
29
  def eventFilter(self, obj, event):
@@ -69,9 +70,14 @@ class GlobalEventFilter(QObject):
69
70
  if event.key() == Qt.Key_4:
70
71
  self.sam_deploy_generator_dialog.predict()
71
72
  return True
72
-
73
- # Handle hotkey for auto distill prediction
73
+
74
+ # Handle hotkey for see anything (YOLOE) generator
74
75
  if event.key() == Qt.Key_5:
76
+ self.see_anything_deploy_generator_dialog.predict()
77
+ return True
78
+
79
+ # Handle hotkey for auto distill prediction
80
+ if event.key() == Qt.Key_6:
75
81
  self.auto_distill_deploy_model_dialog.predict()
76
82
  return True
77
83