coralnet-toolbox 0.0.74__py2.py3-none-any.whl → 0.0.76__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. coralnet_toolbox/Annotations/QtPolygonAnnotation.py +57 -12
  2. coralnet_toolbox/Annotations/QtRectangleAnnotation.py +44 -14
  3. coralnet_toolbox/Explorer/QtDataItem.py +52 -22
  4. coralnet_toolbox/Explorer/QtExplorer.py +277 -1600
  5. coralnet_toolbox/Explorer/QtSettingsWidgets.py +101 -15
  6. coralnet_toolbox/Explorer/QtViewers.py +1568 -0
  7. coralnet_toolbox/Explorer/transformer_models.py +70 -0
  8. coralnet_toolbox/Explorer/yolo_models.py +112 -0
  9. coralnet_toolbox/IO/QtExportMaskAnnotations.py +538 -403
  10. coralnet_toolbox/Icons/system_monitor.png +0 -0
  11. coralnet_toolbox/MachineLearning/ImportDataset/QtBase.py +239 -147
  12. coralnet_toolbox/MachineLearning/VideoInference/YOLO3D/run.py +102 -16
  13. coralnet_toolbox/QtAnnotationWindow.py +16 -10
  14. coralnet_toolbox/QtEventFilter.py +4 -4
  15. coralnet_toolbox/QtImageWindow.py +3 -7
  16. coralnet_toolbox/QtMainWindow.py +104 -64
  17. coralnet_toolbox/QtProgressBar.py +1 -0
  18. coralnet_toolbox/QtSystemMonitor.py +370 -0
  19. coralnet_toolbox/Rasters/RasterTableModel.py +20 -0
  20. coralnet_toolbox/Results/ConvertResults.py +14 -8
  21. coralnet_toolbox/Results/ResultsProcessor.py +3 -2
  22. coralnet_toolbox/SAM/QtDeployGenerator.py +2 -5
  23. coralnet_toolbox/SAM/QtDeployPredictor.py +11 -3
  24. coralnet_toolbox/SeeAnything/QtDeployGenerator.py +146 -116
  25. coralnet_toolbox/SeeAnything/QtDeployPredictor.py +55 -9
  26. coralnet_toolbox/Tile/QtTileBatchInference.py +4 -4
  27. coralnet_toolbox/Tools/QtPolygonTool.py +42 -3
  28. coralnet_toolbox/Tools/QtRectangleTool.py +30 -0
  29. coralnet_toolbox/Tools/QtSAMTool.py +140 -91
  30. coralnet_toolbox/Transformers/Models/GroundingDINO.py +72 -0
  31. coralnet_toolbox/Transformers/Models/OWLViT.py +72 -0
  32. coralnet_toolbox/Transformers/Models/OmDetTurbo.py +68 -0
  33. coralnet_toolbox/Transformers/Models/QtBase.py +120 -0
  34. coralnet_toolbox/{AutoDistill → Transformers}/Models/__init__.py +1 -1
  35. coralnet_toolbox/{AutoDistill → Transformers}/QtBatchInference.py +15 -15
  36. coralnet_toolbox/{AutoDistill → Transformers}/QtDeployModel.py +18 -16
  37. coralnet_toolbox/{AutoDistill → Transformers}/__init__.py +1 -1
  38. coralnet_toolbox/__init__.py +1 -1
  39. coralnet_toolbox/utilities.py +21 -15
  40. {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/METADATA +13 -10
  41. {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/RECORD +45 -40
  42. coralnet_toolbox/AutoDistill/Models/GroundingDINO.py +0 -81
  43. coralnet_toolbox/AutoDistill/Models/OWLViT.py +0 -76
  44. coralnet_toolbox/AutoDistill/Models/OmDetTurbo.py +0 -75
  45. coralnet_toolbox/AutoDistill/Models/QtBase.py +0 -112
  46. {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/WHEEL +0 -0
  47. {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/entry_points.txt +0 -0
  48. {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/licenses/LICENSE.txt +0 -0
  49. {coralnet_toolbox-0.0.74.dist-info → coralnet_toolbox-0.0.76.dist-info}/top_level.txt +0 -0
@@ -1,75 +0,0 @@
1
- from dataclasses import dataclass
2
-
3
- import cv2
4
- import numpy as np
5
-
6
- import supervision as sv
7
-
8
- from transformers import AutoProcessor, OmDetTurboForObjectDetection
9
-
10
- from autodistill.detection import CaptionOntology
11
- from autodistill.helpers import load_image
12
-
13
- from coralnet_toolbox.AutoDistill.Models.QtBase import QtBaseModel
14
-
15
-
16
- # ----------------------------------------------------------------------------------------------------------------------
17
- # Classes
18
- # ----------------------------------------------------------------------------------------------------------------------
19
-
20
-
21
- @dataclass
22
- class OmDetTurboModel(QtBaseModel):
23
- def __init__(self, ontology: CaptionOntology, device: str = "cpu"):
24
- super().__init__(ontology, device)
25
-
26
- model_name = "omlab/omdet-turbo-swin-tiny-hf"
27
- self.processor = AutoProcessor.from_pretrained(model_name, use_fast=True)
28
- self.model = OmDetTurboForObjectDetection.from_pretrained(model_name).to(self.device)
29
-
30
- def _process_predictions(self, image, texts, class_idx_mapper, confidence):
31
- """Process model predictions for a single image."""
32
- inputs = self.processor(text=texts, images=image, return_tensors="pt").to(self.device)
33
- outputs = self.model(**inputs)
34
-
35
- results = self.processor.post_process_grounded_object_detection(
36
- outputs,
37
- threshold=confidence,
38
- target_sizes=[image.shape[:2]],
39
- text_labels=texts,
40
- )[0]
41
-
42
- boxes, scores, labels = (
43
- results["boxes"],
44
- results["scores"],
45
- results["text_labels"],
46
- )
47
-
48
- final_boxes, final_scores, final_labels = [], [], []
49
-
50
- for box, score, label in zip(boxes, scores, labels):
51
- try:
52
- box = box.detach().cpu().numpy().astype(int).tolist()
53
- score = score.item()
54
- label = class_idx_mapper[label]
55
-
56
- # Amplify scores
57
- if score < confidence:
58
- continue
59
-
60
- final_boxes.append(box)
61
- final_scores.append(score)
62
- final_labels.append(label)
63
-
64
- except Exception as e:
65
- print(f"Error: Issue converting predictions:\n{e}")
66
- continue
67
-
68
- if len(final_boxes) == 0:
69
- return None
70
-
71
- return sv.Detections(
72
- xyxy=np.array(final_boxes),
73
- class_id=np.array(final_labels),
74
- confidence=np.array(final_scores)
75
- )
@@ -1,112 +0,0 @@
1
- from dataclasses import dataclass
2
- from abc import ABC, abstractmethod
3
-
4
- import cv2
5
- import numpy as np
6
-
7
- from autodistill.detection import CaptionOntology, DetectionBaseModel
8
- from autodistill.helpers import load_image
9
-
10
-
11
- # ----------------------------------------------------------------------------------------------------------------------
12
- # Classes
13
- # ----------------------------------------------------------------------------------------------------------------------
14
-
15
-
16
- @dataclass
17
- class QtBaseModel(DetectionBaseModel, ABC):
18
- """
19
- Base class for CoralNet foundation models that provides common functionality for
20
- handling inputs, processing image data, and formatting detection results.
21
- """
22
- ontology: CaptionOntology
23
-
24
- def __init__(self, ontology: CaptionOntology, device: str = "cpu"):
25
- """
26
- Initialize the base model with ontology and device.
27
-
28
- Args:
29
- ontology: The CaptionOntology containing class labels
30
- device: The compute device (cpu, cuda, etc.)
31
- """
32
- self.ontology = ontology
33
- self.device = device
34
- self.processor = None
35
- self.model = None
36
-
37
- @abstractmethod
38
- def _process_predictions(self, image, texts, class_idx_mapper, confidence):
39
- """
40
- Process model predictions for a single image.
41
-
42
- Args:
43
- image: The input image
44
- texts: The text prompts from the ontology
45
- class_idx_mapper: Mapping from text labels to class indices
46
- confidence: Confidence threshold
47
-
48
- Returns:
49
- sv.Detections object or None if no detections
50
- """
51
- pass
52
-
53
- def predict(self, input, confidence=0.01):
54
- """
55
- Run inference on input images.
56
-
57
- Args:
58
- input: Can be an image path, a list of image paths, a numpy array, or a list of numpy arrays
59
- confidence: Detection confidence threshold
60
-
61
- Returns:
62
- Either a single sv.Detections object or a list of sv.Detections objects
63
- """
64
- # Normalize input into a list of CV2-format images
65
- images = []
66
- if isinstance(input, str):
67
- # Single image path
68
- images = [load_image(input, return_format="cv2")]
69
- elif isinstance(input, np.ndarray):
70
- # Single image numpy array or batch of images
71
- if input.ndim == 3:
72
- images = [cv2.cvtColor(input, cv2.COLOR_RGB2BGR)]
73
- elif input.ndim == 4:
74
- for img in input:
75
- images.append(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
76
- else:
77
- raise ValueError("Unsupported numpy array dimensions.")
78
- elif isinstance(input, list):
79
- if all(isinstance(i, str) for i in input):
80
- # List of image paths
81
- for path in input:
82
- images.append(load_image(path, return_format="cv2"))
83
- elif all(isinstance(i, np.ndarray) for i in input):
84
- # List of image arrays
85
- for img in input:
86
- images.append(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
87
- else:
88
- raise ValueError("List must contain all image paths or all numpy arrays.")
89
- else:
90
- raise ValueError(
91
- "Input must be an image path, a list of image paths, a numpy array, or a list/array of numpy arrays."
92
- )
93
-
94
- detections_result = []
95
-
96
- # Get text prompts and create class index mapper
97
- texts = self.ontology.prompts()
98
- class_idx_mapper = {label: idx for idx, label in enumerate(texts)}
99
-
100
- # Loop through images
101
- for image in images:
102
- # Process predictions for this image
103
- detection = self._process_predictions(image, texts, class_idx_mapper, confidence)
104
- if detection is not None:
105
- detections_result.append(detection)
106
-
107
- # Return detections for a single image directly,
108
- # or a list of detections if multiple images were passed
109
- if len(detections_result) == 1:
110
- return detections_result[0]
111
- else:
112
- return detections_result