coralnet-toolbox 0.0.72__py2.py3-none-any.whl → 0.0.74__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. coralnet_toolbox/Annotations/QtAnnotation.py +28 -69
  2. coralnet_toolbox/Annotations/QtMaskAnnotation.py +408 -0
  3. coralnet_toolbox/Annotations/QtMultiPolygonAnnotation.py +72 -56
  4. coralnet_toolbox/Annotations/QtPatchAnnotation.py +165 -216
  5. coralnet_toolbox/Annotations/QtPolygonAnnotation.py +497 -353
  6. coralnet_toolbox/Annotations/QtRectangleAnnotation.py +126 -116
  7. coralnet_toolbox/AutoDistill/QtDeployModel.py +23 -12
  8. coralnet_toolbox/CoralNet/QtDownload.py +2 -1
  9. coralnet_toolbox/Explorer/QtDataItem.py +1 -1
  10. coralnet_toolbox/Explorer/QtExplorer.py +159 -17
  11. coralnet_toolbox/Explorer/QtSettingsWidgets.py +160 -86
  12. coralnet_toolbox/IO/QtExportTagLabAnnotations.py +30 -10
  13. coralnet_toolbox/IO/QtImportTagLabAnnotations.py +21 -15
  14. coralnet_toolbox/IO/QtOpenProject.py +46 -78
  15. coralnet_toolbox/IO/QtSaveProject.py +18 -43
  16. coralnet_toolbox/MachineLearning/DeployModel/QtDetect.py +22 -11
  17. coralnet_toolbox/MachineLearning/DeployModel/QtSegment.py +22 -10
  18. coralnet_toolbox/MachineLearning/ExportDataset/QtBase.py +61 -24
  19. coralnet_toolbox/MachineLearning/ExportDataset/QtClassify.py +5 -1
  20. coralnet_toolbox/MachineLearning/ExportDataset/QtDetect.py +19 -6
  21. coralnet_toolbox/MachineLearning/ExportDataset/QtSegment.py +21 -8
  22. coralnet_toolbox/MachineLearning/ImportDataset/QtBase.py +42 -22
  23. coralnet_toolbox/MachineLearning/VideoInference/QtBase.py +0 -4
  24. coralnet_toolbox/QtAnnotationWindow.py +42 -14
  25. coralnet_toolbox/QtEventFilter.py +19 -2
  26. coralnet_toolbox/QtImageWindow.py +134 -86
  27. coralnet_toolbox/QtLabelWindow.py +14 -2
  28. coralnet_toolbox/QtMainWindow.py +122 -9
  29. coralnet_toolbox/QtProgressBar.py +52 -27
  30. coralnet_toolbox/Rasters/QtRaster.py +59 -7
  31. coralnet_toolbox/Rasters/RasterTableModel.py +42 -14
  32. coralnet_toolbox/SAM/QtBatchInference.py +0 -2
  33. coralnet_toolbox/SAM/QtDeployGenerator.py +22 -11
  34. coralnet_toolbox/SAM/QtDeployPredictor.py +10 -0
  35. coralnet_toolbox/SeeAnything/QtBatchInference.py +19 -221
  36. coralnet_toolbox/SeeAnything/QtDeployGenerator.py +1634 -0
  37. coralnet_toolbox/SeeAnything/QtDeployPredictor.py +107 -154
  38. coralnet_toolbox/SeeAnything/QtTrainModel.py +115 -45
  39. coralnet_toolbox/SeeAnything/__init__.py +2 -0
  40. coralnet_toolbox/Tools/QtCutSubTool.py +18 -2
  41. coralnet_toolbox/Tools/QtResizeSubTool.py +19 -2
  42. coralnet_toolbox/Tools/QtSAMTool.py +222 -57
  43. coralnet_toolbox/Tools/QtSeeAnythingTool.py +223 -55
  44. coralnet_toolbox/Tools/QtSelectSubTool.py +6 -4
  45. coralnet_toolbox/Tools/QtSelectTool.py +27 -3
  46. coralnet_toolbox/Tools/QtSubtractSubTool.py +66 -0
  47. coralnet_toolbox/Tools/QtWorkAreaTool.py +25 -13
  48. coralnet_toolbox/Tools/__init__.py +2 -0
  49. coralnet_toolbox/__init__.py +1 -1
  50. coralnet_toolbox/utilities.py +137 -47
  51. coralnet_toolbox-0.0.74.dist-info/METADATA +375 -0
  52. {coralnet_toolbox-0.0.72.dist-info → coralnet_toolbox-0.0.74.dist-info}/RECORD +56 -53
  53. coralnet_toolbox-0.0.72.dist-info/METADATA +0 -341
  54. {coralnet_toolbox-0.0.72.dist-info → coralnet_toolbox-0.0.74.dist-info}/WHEEL +0 -0
  55. {coralnet_toolbox-0.0.72.dist-info → coralnet_toolbox-0.0.74.dist-info}/entry_points.txt +0 -0
  56. {coralnet_toolbox-0.0.72.dist-info → coralnet_toolbox-0.0.74.dist-info}/licenses/LICENSE.txt +0 -0
  57. {coralnet_toolbox-0.0.72.dist-info → coralnet_toolbox-0.0.74.dist-info}/top_level.txt +0 -0
@@ -53,6 +53,10 @@ class Detect(Base):
53
53
  self.include_rectangles_checkbox.setEnabled(True) # Enable user to uncheck rectangles if desired
54
54
  self.include_polygons_checkbox.setChecked(True)
55
55
  self.include_polygons_checkbox.setEnabled(True) # Already enabled
56
+
57
+ # Explicitly enable negative sample options for detection
58
+ self.include_negatives_radio.setEnabled(True)
59
+ self.exclude_negatives_radio.setEnabled(True)
56
60
 
57
61
  def create_dataset(self, output_dir_path):
58
62
  """
@@ -106,12 +110,20 @@ class Detect(Base):
106
110
  Process and save detection annotations.
107
111
 
108
112
  Args:
109
- annotations (list): List of annotations.
113
+ annotations (list): List of annotations for this split.
110
114
  split_dir (str): Path to the split directory.
111
115
  split (str): Split name (e.g., "Training", "Validation", "Testing").
112
116
  """
113
- # Get unique image paths
114
- image_paths = list(set(a.image_path for a in annotations))
117
+ # Determine the full list of images for this split (including negatives)
118
+ if split == "Training":
119
+ image_paths = self.train_images
120
+ elif split == "Validation":
121
+ image_paths = self.val_images
122
+ elif split == "Testing":
123
+ image_paths = self.test_images
124
+ else:
125
+ image_paths = []
126
+
115
127
  if not image_paths:
116
128
  return
117
129
 
@@ -124,6 +136,7 @@ class Detect(Base):
124
136
  for image_path in image_paths:
125
137
  yolo_annotations = []
126
138
  image_height, image_width = rasterio_open(image_path).shape
139
+ # Filter the annotations passed to this function to get only those for the current image
127
140
  image_annotations = [a for a in annotations if a.image_path == image_path]
128
141
 
129
142
  for image_annotation in image_annotations:
@@ -132,11 +145,11 @@ class Detect(Base):
132
145
  yolo_annotations.append(f"{class_number} {annotation}")
133
146
 
134
147
  # Save the annotations to a text file
135
- file_ext = image_path.split(".")[-1]
136
- text_file = os.path.basename(image_path).replace(f".{file_ext}", ".txt")
148
+ file_ext = os.path.splitext(image_path)[1]
149
+ text_file = os.path.basename(image_path).replace(file_ext, ".txt")
137
150
  text_path = os.path.join(f"{split_dir}/labels", text_file)
138
151
 
139
- # Write the annotations to the text file
152
+ # Write the annotations to the text file (creates an empty file for negatives)
140
153
  with open(text_path, 'w') as f:
141
154
  for annotation in yolo_annotations:
142
155
  f.write(annotation + '\n')
@@ -53,6 +53,10 @@ class Segment(Base):
53
53
  self.include_rectangles_checkbox.setEnabled(True) # Enable rectangles for segmentation
54
54
  self.include_polygons_checkbox.setChecked(True)
55
55
  self.include_polygons_checkbox.setEnabled(True) # Enable user to uncheck polygons if desired
56
+
57
+ # Explicitly enable negative sample options for segmentation
58
+ self.include_negatives_radio.setEnabled(True)
59
+ self.exclude_negatives_radio.setEnabled(True)
56
60
 
57
61
  def create_dataset(self, output_dir_path):
58
62
  """
@@ -106,12 +110,20 @@ class Segment(Base):
106
110
  Process and save segmentation annotations.
107
111
 
108
112
  Args:
109
- annotations (list): List of annotations.
113
+ annotations (list): List of annotations for this split.
110
114
  split_dir (str): Path to the split directory.
111
115
  split (str): Split name (e.g., "Training", "Validation", "Testing").
112
116
  """
113
- # Get unique image paths
114
- image_paths = list(set(a.image_path for a in annotations))
117
+ # Determine the full list of images for this split (including negatives)
118
+ if split == "Training":
119
+ image_paths = self.train_images
120
+ elif split == "Validation":
121
+ image_paths = self.val_images
122
+ elif split == "Testing":
123
+ image_paths = self.test_images
124
+ else:
125
+ image_paths = []
126
+
115
127
  if not image_paths:
116
128
  return
117
129
 
@@ -124,6 +136,7 @@ class Segment(Base):
124
136
  for image_path in image_paths:
125
137
  yolo_annotations = []
126
138
  image_height, image_width = rasterio_open(image_path).shape
139
+ # Filter the annotations passed to this function to get only those for the current image
127
140
  image_annotations = [a for a in annotations if a.image_path == image_path]
128
141
 
129
142
  for image_annotation in image_annotations:
@@ -132,11 +145,11 @@ class Segment(Base):
132
145
  yolo_annotations.append(f"{class_number} {annotation}")
133
146
 
134
147
  # Save the annotations to a text file
135
- file_ext = image_path.split(".")[-1]
136
- text_file = os.path.basename(image_path).replace(f".{file_ext}", ".txt")
148
+ file_ext = os.path.splitext(image_path)[1]
149
+ text_file = os.path.basename(image_path).replace(file_ext, ".txt")
137
150
  text_path = os.path.join(f"{split_dir}/labels", text_file)
138
151
 
139
- # Write the annotations to the text file
152
+ # Write the annotations to the text file (creates an empty file for negatives)
140
153
  with open(text_path, 'w') as f:
141
154
  for annotation in yolo_annotations:
142
155
  f.write(annotation + '\n')
@@ -146,7 +159,7 @@ class Segment(Base):
146
159
 
147
160
  progress_bar.update_progress()
148
161
 
149
- # Make cursor normal
162
+ # Reset cursor
150
163
  QApplication.restoreOverrideCursor()
151
164
  progress_bar.stop_progress()
152
- progress_bar.close()
165
+ progress_bar.close()
@@ -35,7 +35,7 @@ class DatasetProcessor(QObject):
35
35
  """
36
36
  status_changed = pyqtSignal(str, int)
37
37
  progress_updated = pyqtSignal(int)
38
- processing_complete = pyqtSignal(list, list)
38
+ processing_complete = pyqtSignal(list, list, list)
39
39
  error = pyqtSignal(str)
40
40
  finished = pyqtSignal()
41
41
 
@@ -47,6 +47,7 @@ class DatasetProcessor(QObject):
47
47
  self.import_as = import_as # 'rectangle' or 'polygon' (target format)
48
48
  self.rename_on_conflict = rename_on_conflict
49
49
  self.is_running = True
50
+ self.parsing_errors = [] # To collect errors instead of printing
50
51
 
51
52
  def stop(self):
52
53
  self.is_running = False
@@ -81,7 +82,7 @@ class DatasetProcessor(QObject):
81
82
 
82
83
  # Step 4: Emit results for GUI to consume
83
84
  image_paths = list(image_label_paths.keys())
84
- self.processing_complete.emit(raw_annotations, image_paths)
85
+ self.processing_complete.emit(raw_annotations, image_paths, self.parsing_errors)
85
86
 
86
87
  except Exception as e:
87
88
  # Catch-all for any error during processing
@@ -146,7 +147,7 @@ class DatasetProcessor(QObject):
146
147
  with open(label_path, 'r') as file:
147
148
  lines = file.readlines()
148
149
 
149
- for line in lines:
150
+ for line_num, line in enumerate(lines):
150
151
  try:
151
152
  parts = list(map(float, line.split()))
152
153
  class_id = int(parts[0])
@@ -195,8 +196,11 @@ class DatasetProcessor(QObject):
195
196
 
196
197
  all_raw_annotations.append(raw_ann_data)
197
198
  except (ValueError, IndexError) as e:
198
- # Skip malformed lines and print a warning
199
- print(f"Skipping malformed line in {label_path}: {line.strip()} ({e})")
199
+ # Log the malformed line error instead of printing
200
+ error_msg = (f"In file '{os.path.basename(label_path)}' on line {line_num + 1}:\n"
201
+ f"Skipped malformed content: '{line.strip()}'\nReason: {e}\n")
202
+ self.parsing_errors.append(error_msg)
203
+
200
204
 
201
205
  # Update progress after each image
202
206
  self.progress_updated.emit(i + 1)
@@ -290,11 +294,14 @@ class Base(QDialog):
290
294
  )
291
295
  if file_path:
292
296
  self.yaml_path_label.setText(file_path)
293
- # Auto-fill output directory and folder name if not set
297
+ # Auto-fill output directory to be the PARENT of the yaml's directory
294
298
  if not self.output_dir_label.text():
295
- self.output_dir_label.setText(os.path.dirname(file_path))
299
+ parent_dir = os.path.dirname(os.path.dirname(file_path))
300
+ self.output_dir_label.setText(parent_dir)
296
301
  if not self.output_folder_name.text():
297
- self.output_folder_name.setText("project")
302
+ # Suggest a folder name based on the yaml file's parent folder
303
+ project_name = os.path.basename(os.path.dirname(file_path))
304
+ self.output_folder_name.setText(f"{project_name}_imported")
298
305
 
299
306
  def browse_output_dir(self):
300
307
  """Open a dialog to select the output directory."""
@@ -309,6 +316,16 @@ class Base(QDialog):
309
316
  if not all([self.yaml_path_label.text(), self.output_dir_label.text(), self.output_folder_name.text()]):
310
317
  QMessageBox.warning(self, "Error", "Please fill in all fields.")
311
318
  return
319
+
320
+ # This check for existing output is still relevant
321
+ self.output_folder = os.path.join(self.output_dir_label.text(), self.output_folder_name.text())
322
+ if os.path.exists(self.output_folder) and os.listdir(self.output_folder):
323
+ reply = QMessageBox.question(self,
324
+ 'Directory Not Empty',
325
+ f"The directory '{self.output_folder}' is not empty. Continue?",
326
+ QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
327
+ if reply == QMessageBox.No:
328
+ return
312
329
 
313
330
  # Pre-scan for duplicates
314
331
  yaml_path = self.yaml_path_label.text()
@@ -358,16 +375,7 @@ class Base(QDialog):
358
375
  rename_files = False
359
376
  else: # User closed the dialog
360
377
  return
361
-
362
- self.output_folder = os.path.join(self.output_dir_label.text(), self.output_folder_name.text())
363
- if os.path.exists(self.output_folder) and os.listdir(self.output_folder):
364
- reply = QMessageBox.question(self,
365
- 'Directory Not Empty',
366
- f"The directory '{self.output_folder}' is not empty. Continue?",
367
- QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
368
- if reply == QMessageBox.No:
369
- return
370
-
378
+
371
379
  self.button_box.setEnabled(False)
372
380
  QApplication.setOverrideCursor(Qt.WaitCursor)
373
381
 
@@ -393,6 +401,7 @@ class Base(QDialog):
393
401
  self.worker.error.connect(self.on_error)
394
402
  self.worker.status_changed.connect(self.on_status_changed)
395
403
  self.worker.progress_updated.connect(self.on_progress_update)
404
+ # Connect to the updated signal
396
405
  self.worker.processing_complete.connect(self.on_processing_complete)
397
406
  self.thread.start()
398
407
 
@@ -403,7 +412,7 @@ class Base(QDialog):
403
412
  def on_progress_update(self, value):
404
413
  self.progress_bar.set_value(value)
405
414
 
406
- def on_processing_complete(self, raw_annotations, image_paths):
415
+ def on_processing_complete(self, raw_annotations, image_paths, parsing_errors):
407
416
  added_paths = []
408
417
  for path in image_paths:
409
418
  if self.image_window.add_image(path):
@@ -447,9 +456,20 @@ class Base(QDialog):
447
456
  self.image_window.update_image_annotations(added_paths[-1])
448
457
  self.annotation_window.load_annotations()
449
458
 
450
- QMessageBox.information(self,
451
- "Dataset Imported",
452
- "Dataset has been successfully imported.")
459
+ # --- Display a summary message, including any parsing errors ---
460
+ summary_message = "Dataset has been successfully imported."
461
+ if parsing_errors:
462
+ # If there were errors, show a more detailed dialog
463
+ QMessageBox.warning(self,
464
+ "Import Complete with Warnings",
465
+ f"{summary_message}\n\nHowever, {len(parsing_errors)} issue(s) were found "
466
+ "in the label files. Please review them below.",
467
+ details='\n'.join(parsing_errors))
468
+ else:
469
+ # Otherwise, show a simple info box
470
+ QMessageBox.information(self,
471
+ "Dataset Imported",
472
+ summary_message)
453
473
 
454
474
  def export_annotations_to_json(self, annotations_list, output_dir):
455
475
  """
@@ -320,10 +320,6 @@ class Base(QDialog):
320
320
  # If video already loaded, update output dir for widget
321
321
  if self.video_path:
322
322
  self.video_region_widget.load_video(self.video_path, dir_name)
323
- else:
324
- self.update_record_buttons()
325
- else:
326
- self.update_record_buttons()
327
323
 
328
324
  def browse_model(self):
329
325
  """Open file dialog to select model file (filtered to .pt, .pth)."""
@@ -408,10 +408,6 @@ class AnnotationWindow(QGraphicsView):
408
408
 
409
409
  self.toggle_cursor_annotation()
410
410
 
411
- # Set the image dimensions, and current view in status bar
412
- self.imageLoaded.emit(self.pixmap_image.width(), self.pixmap_image.height())
413
- self.viewChanged.emit(self.pixmap_image.width(), self.pixmap_image.height())
414
-
415
411
  # Load all associated annotations
416
412
  self.load_annotations()
417
413
  # Update the image window's image annotations
@@ -421,6 +417,10 @@ class AnnotationWindow(QGraphicsView):
421
417
 
422
418
  QApplication.processEvents()
423
419
 
420
+ # Set the image dimensions, and current view in status bar
421
+ self.imageLoaded.emit(self.pixmap_image.width(), self.pixmap_image.height())
422
+ self.viewChanged.emit(self.pixmap_image.width(), self.pixmap_image.height())
423
+
424
424
  def update_current_image_path(self, image_path):
425
425
  """Update the current image path being displayed."""
426
426
  self.current_image_path = image_path
@@ -466,29 +466,57 @@ class AnnotationWindow(QGraphicsView):
466
466
  self.centerOn(annotation_center)
467
467
 
468
468
  def center_on_annotation(self, annotation):
469
- """Center and zoom in to focus on the specified annotation."""
469
+ """Center and zoom in to focus on the specified annotation with dynamic padding."""
470
470
  # Create graphics item if it doesn't exist
471
471
  if not annotation.graphics_item:
472
472
  annotation.create_graphics_item(self.scene)
473
473
 
474
474
  # Get the bounding rect of the annotation in scene coordinates
475
475
  annotation_rect = annotation.graphics_item.boundingRect()
476
-
477
- # Add some padding around the annotation (20% on each side)
478
- padding_x = annotation_rect.width() * 0.2
479
- padding_y = annotation_rect.height() * 0.2
476
+
477
+ # Step 1: Calculate annotation and image area
478
+ annotation_area = annotation_rect.width() * annotation_rect.height()
479
+ if self.pixmap_image:
480
+ image_width = self.pixmap_image.width()
481
+ image_height = self.pixmap_image.height()
482
+ else:
483
+ # Fallback to scene rect if image not loaded
484
+ image_width = self.scene.sceneRect().width()
485
+ image_height = self.scene.sceneRect().height()
486
+ image_area = image_width * image_height
487
+
488
+ # Step 2: Compute the relative area ratio (avoid division by zero)
489
+ if image_area > 0:
490
+ relative_area = annotation_area / image_area
491
+ else:
492
+ relative_area = 1.0 # fallback, treat as full image
493
+
494
+ # Step 3: Map ratio to padding factor (smaller annotation = more padding)
495
+ # Example: padding_factor = clamp(0.5 * (1/relative_area)**0.5, 0.1, 0.5)
496
+ # - For very small annotations, padding approaches 0.5 (50%)
497
+ # - For large annotations, padding approaches 0.1 (10%)
498
+ import math
499
+ min_padding = 0.1 # 10%
500
+ max_padding = 0.5 # 50%
501
+ if relative_area > 0:
502
+ padding_factor = max(min(0.5 * (1 / math.sqrt(relative_area)), max_padding), min_padding)
503
+ else:
504
+ padding_factor = min_padding
505
+
506
+ # Step 4: Apply dynamic padding
507
+ padding_x = annotation_rect.width() * padding_factor
508
+ padding_y = annotation_rect.height() * padding_factor
480
509
  padded_rect = annotation_rect.adjusted(-padding_x, -padding_y, padding_x, padding_y)
481
-
510
+
482
511
  # Fit the padded annotation rect in the view
483
512
  self.fitInView(padded_rect, Qt.KeepAspectRatio)
484
-
513
+
485
514
  # Update the zoom factor based on the new view transformation
486
- # We can calculate this by comparing the viewport size to the scene rect size
487
515
  view_rect = self.viewport().rect()
488
- zoom_x = view_rect.width() / padded_rect.width()
516
+ zoom_x = view_rect.width() / padded_rect.width()
489
517
  zoom_y = view_rect.height() / padded_rect.height()
490
518
  self.zoom_factor = min(zoom_x, zoom_y)
491
-
519
+
492
520
  # Signal that the view has changed
493
521
  self.viewChanged.emit(*self.get_image_dimensions())
494
522
 
@@ -23,6 +23,7 @@ class GlobalEventFilter(QObject):
23
23
  self.detect_deploy_model_dialog = main_window.detect_deploy_model_dialog
24
24
  self.segment_deploy_model_dialog = main_window.segment_deploy_model_dialog
25
25
  self.sam_deploy_generator_dialog = main_window.sam_deploy_generator_dialog
26
+ self.see_anything_deploy_generator_dialog = main_window.see_anything_deploy_generator_dialog
26
27
  self.auto_distill_deploy_model_dialog = main_window.auto_distill_deploy_model_dialog
27
28
 
28
29
  def eventFilter(self, obj, event):
@@ -69,9 +70,14 @@ class GlobalEventFilter(QObject):
69
70
  if event.key() == Qt.Key_4:
70
71
  self.sam_deploy_generator_dialog.predict()
71
72
  return True
72
-
73
- # Handle hotkey for auto distill prediction
73
+
74
+ # Handle hotkey for see anything (YOLOE) generator
74
75
  if event.key() == Qt.Key_5:
76
+ self.see_anything_deploy_generator_dialog.predict()
77
+ return True
78
+
79
+ # Handle hotkey for auto distill prediction
80
+ if event.key() == Qt.Key_6:
75
81
  self.auto_distill_deploy_model_dialog.predict()
76
82
  return True
77
83
 
@@ -85,9 +91,20 @@ class GlobalEventFilter(QObject):
85
91
 
86
92
  # Delete (backspace or delete key) selected annotations when select tool is active
87
93
  if event.key() == Qt.Key_Delete or event.key() == Qt.Key_Backspace:
94
+ # First check if the select tool is active
88
95
  if self.main_window.select_tool_action.isChecked():
96
+ selected_tool = self.annotation_window.selected_tool
97
+ select_tool = self.annotation_window.tools[selected_tool]
98
+ # Get the active subtool if it exists, pass to its keyPressEvent
99
+ if hasattr(select_tool, 'active_subtool') and select_tool.active_subtool:
100
+ select_tool.active_subtool.keyPressEvent(event)
101
+ return True
102
+
103
+ # Otherwise, proceed with deletion if there are selected annotations
89
104
  if self.annotation_window.selected_annotations:
90
105
  self.annotation_window.delete_selected_annotations()
106
+ return True
107
+
91
108
  # Consume the event so it doesn't do anything else
92
109
  return True
93
110