coralnet-toolbox 0.0.75__py2.py3-none-any.whl → 0.0.76__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coralnet_toolbox/Annotations/QtPolygonAnnotation.py +57 -12
- coralnet_toolbox/Annotations/QtRectangleAnnotation.py +44 -14
- coralnet_toolbox/Explorer/transformer_models.py +13 -2
- coralnet_toolbox/IO/QtExportMaskAnnotations.py +538 -403
- coralnet_toolbox/Icons/system_monitor.png +0 -0
- coralnet_toolbox/QtEventFilter.py +4 -4
- coralnet_toolbox/QtMainWindow.py +104 -64
- coralnet_toolbox/QtProgressBar.py +1 -0
- coralnet_toolbox/QtSystemMonitor.py +370 -0
- coralnet_toolbox/Results/ConvertResults.py +14 -8
- coralnet_toolbox/Results/ResultsProcessor.py +3 -2
- coralnet_toolbox/SAM/QtDeployGenerator.py +1 -1
- coralnet_toolbox/SAM/QtDeployPredictor.py +10 -0
- coralnet_toolbox/SeeAnything/QtDeployGenerator.py +15 -10
- coralnet_toolbox/SeeAnything/QtDeployPredictor.py +10 -6
- coralnet_toolbox/Tile/QtTileBatchInference.py +4 -4
- coralnet_toolbox/Tools/QtSAMTool.py +140 -91
- coralnet_toolbox/Transformers/Models/GroundingDINO.py +72 -0
- coralnet_toolbox/Transformers/Models/OWLViT.py +72 -0
- coralnet_toolbox/Transformers/Models/OmDetTurbo.py +68 -0
- coralnet_toolbox/Transformers/Models/QtBase.py +120 -0
- coralnet_toolbox/{AutoDistill → Transformers}/Models/__init__.py +1 -1
- coralnet_toolbox/{AutoDistill → Transformers}/QtBatchInference.py +15 -15
- coralnet_toolbox/{AutoDistill → Transformers}/QtDeployModel.py +18 -16
- coralnet_toolbox/{AutoDistill → Transformers}/__init__.py +1 -1
- coralnet_toolbox/__init__.py +1 -1
- coralnet_toolbox/utilities.py +0 -15
- {coralnet_toolbox-0.0.75.dist-info → coralnet_toolbox-0.0.76.dist-info}/METADATA +9 -9
- {coralnet_toolbox-0.0.75.dist-info → coralnet_toolbox-0.0.76.dist-info}/RECORD +33 -31
- coralnet_toolbox/AutoDistill/Models/GroundingDINO.py +0 -81
- coralnet_toolbox/AutoDistill/Models/OWLViT.py +0 -76
- coralnet_toolbox/AutoDistill/Models/OmDetTurbo.py +0 -75
- coralnet_toolbox/AutoDistill/Models/QtBase.py +0 -112
- {coralnet_toolbox-0.0.75.dist-info → coralnet_toolbox-0.0.76.dist-info}/WHEEL +0 -0
- {coralnet_toolbox-0.0.75.dist-info → coralnet_toolbox-0.0.76.dist-info}/entry_points.txt +0 -0
- {coralnet_toolbox-0.0.75.dist-info → coralnet_toolbox-0.0.76.dist-info}/licenses/LICENSE.txt +0 -0
- {coralnet_toolbox-0.0.75.dist-info → coralnet_toolbox-0.0.76.dist-info}/top_level.txt +0 -0
@@ -1,112 +0,0 @@
|
|
1
|
-
from dataclasses import dataclass
|
2
|
-
from abc import ABC, abstractmethod
|
3
|
-
|
4
|
-
import cv2
|
5
|
-
import numpy as np
|
6
|
-
|
7
|
-
from autodistill.detection import CaptionOntology, DetectionBaseModel
|
8
|
-
from autodistill.helpers import load_image
|
9
|
-
|
10
|
-
|
11
|
-
# ----------------------------------------------------------------------------------------------------------------------
|
12
|
-
# Classes
|
13
|
-
# ----------------------------------------------------------------------------------------------------------------------
|
14
|
-
|
15
|
-
|
16
|
-
@dataclass
|
17
|
-
class QtBaseModel(DetectionBaseModel, ABC):
|
18
|
-
"""
|
19
|
-
Base class for CoralNet foundation models that provides common functionality for
|
20
|
-
handling inputs, processing image data, and formatting detection results.
|
21
|
-
"""
|
22
|
-
ontology: CaptionOntology
|
23
|
-
|
24
|
-
def __init__(self, ontology: CaptionOntology, device: str = "cpu"):
|
25
|
-
"""
|
26
|
-
Initialize the base model with ontology and device.
|
27
|
-
|
28
|
-
Args:
|
29
|
-
ontology: The CaptionOntology containing class labels
|
30
|
-
device: The compute device (cpu, cuda, etc.)
|
31
|
-
"""
|
32
|
-
self.ontology = ontology
|
33
|
-
self.device = device
|
34
|
-
self.processor = None
|
35
|
-
self.model = None
|
36
|
-
|
37
|
-
@abstractmethod
|
38
|
-
def _process_predictions(self, image, texts, class_idx_mapper, confidence):
|
39
|
-
"""
|
40
|
-
Process model predictions for a single image.
|
41
|
-
|
42
|
-
Args:
|
43
|
-
image: The input image
|
44
|
-
texts: The text prompts from the ontology
|
45
|
-
class_idx_mapper: Mapping from text labels to class indices
|
46
|
-
confidence: Confidence threshold
|
47
|
-
|
48
|
-
Returns:
|
49
|
-
sv.Detections object or None if no detections
|
50
|
-
"""
|
51
|
-
pass
|
52
|
-
|
53
|
-
def predict(self, input, confidence=0.01):
|
54
|
-
"""
|
55
|
-
Run inference on input images.
|
56
|
-
|
57
|
-
Args:
|
58
|
-
input: Can be an image path, a list of image paths, a numpy array, or a list of numpy arrays
|
59
|
-
confidence: Detection confidence threshold
|
60
|
-
|
61
|
-
Returns:
|
62
|
-
Either a single sv.Detections object or a list of sv.Detections objects
|
63
|
-
"""
|
64
|
-
# Normalize input into a list of CV2-format images
|
65
|
-
images = []
|
66
|
-
if isinstance(input, str):
|
67
|
-
# Single image path
|
68
|
-
images = [load_image(input, return_format="cv2")]
|
69
|
-
elif isinstance(input, np.ndarray):
|
70
|
-
# Single image numpy array or batch of images
|
71
|
-
if input.ndim == 3:
|
72
|
-
images = [cv2.cvtColor(input, cv2.COLOR_RGB2BGR)]
|
73
|
-
elif input.ndim == 4:
|
74
|
-
for img in input:
|
75
|
-
images.append(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
|
76
|
-
else:
|
77
|
-
raise ValueError("Unsupported numpy array dimensions.")
|
78
|
-
elif isinstance(input, list):
|
79
|
-
if all(isinstance(i, str) for i in input):
|
80
|
-
# List of image paths
|
81
|
-
for path in input:
|
82
|
-
images.append(load_image(path, return_format="cv2"))
|
83
|
-
elif all(isinstance(i, np.ndarray) for i in input):
|
84
|
-
# List of image arrays
|
85
|
-
for img in input:
|
86
|
-
images.append(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
|
87
|
-
else:
|
88
|
-
raise ValueError("List must contain all image paths or all numpy arrays.")
|
89
|
-
else:
|
90
|
-
raise ValueError(
|
91
|
-
"Input must be an image path, a list of image paths, a numpy array, or a list/array of numpy arrays."
|
92
|
-
)
|
93
|
-
|
94
|
-
detections_result = []
|
95
|
-
|
96
|
-
# Get text prompts and create class index mapper
|
97
|
-
texts = self.ontology.prompts()
|
98
|
-
class_idx_mapper = {label: idx for idx, label in enumerate(texts)}
|
99
|
-
|
100
|
-
# Loop through images
|
101
|
-
for image in images:
|
102
|
-
# Process predictions for this image
|
103
|
-
detection = self._process_predictions(image, texts, class_idx_mapper, confidence)
|
104
|
-
if detection is not None:
|
105
|
-
detections_result.append(detection)
|
106
|
-
|
107
|
-
# Return detections for a single image directly,
|
108
|
-
# or a list of detections if multiple images were passed
|
109
|
-
if len(detections_result) == 1:
|
110
|
-
return detections_result[0]
|
111
|
-
else:
|
112
|
-
return detections_result
|
File without changes
|
File without changes
|
{coralnet_toolbox-0.0.75.dist-info → coralnet_toolbox-0.0.76.dist-info}/licenses/LICENSE.txt
RENAMED
File without changes
|
File without changes
|