maite-datasets 0.0.6__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
maite_datasets/_base.py CHANGED
@@ -8,20 +8,22 @@ from abc import abstractmethod
8
8
  from collections import namedtuple
9
9
  from collections.abc import Iterator, Sequence
10
10
  from pathlib import Path
11
- from typing import Any, Callable, Generic, Literal, NamedTuple, TypeVar, cast
11
+ from typing import Any, Callable, Generic, Literal, NamedTuple, Protocol, TypeVar, cast
12
12
 
13
13
  import numpy as np
14
14
  from maite.protocols import DatasetMetadata, DatumMetadata
15
+ from maite.protocols import image_classification as ic
16
+ from maite.protocols import object_detection as od
15
17
  from numpy.typing import NDArray
16
18
  from PIL import Image
17
19
 
18
20
  from maite_datasets._fileio import _ensure_exists
19
21
  from maite_datasets.protocols import Array
20
22
 
21
- _T = TypeVar("_T")
22
23
  _T_co = TypeVar("_T_co", covariant=True)
23
24
  _TArray = TypeVar("_TArray", bound=Array)
24
25
  _TTarget = TypeVar("_TTarget")
26
+ _TODTarget = TypeVar("_TODTarget", bound=od.ObjectDetectionTarget)
25
27
  _TRawTarget = TypeVar(
26
28
  "_TRawTarget",
27
29
  Sequence[int],
@@ -30,8 +32,7 @@ _TRawTarget = TypeVar(
30
32
  )
31
33
  _TAnnotation = TypeVar("_TAnnotation", int, str, tuple[list[int], list[list[float]]])
32
34
 
33
-
34
- ObjectDetectionTarget = namedtuple("ObjectDetectionTarget", ["boxes", "labels", "scores"])
35
+ ObjectDetectionTargetTuple = namedtuple("ObjectDetectionTargetTuple", ["boxes", "labels", "scores"])
35
36
 
36
37
 
37
38
  class BaseDatasetMixin(Generic[_TArray]):
@@ -249,6 +250,7 @@ class BaseICDataset(
249
250
  BaseDownloadedDataset[_TArray, _TArray, list[int], int],
250
251
  BaseDatasetMixin[_TArray],
251
252
  BaseDataset[_TArray, _TArray],
253
+ ic.Dataset,
252
254
  ):
253
255
  """
254
256
  Base class for image classification datasets.
@@ -278,9 +280,10 @@ class BaseICDataset(
278
280
 
279
281
 
280
282
  class BaseODDataset(
281
- BaseDownloadedDataset[_TArray, ObjectDetectionTarget, _TRawTarget, _TAnnotation],
283
+ BaseDownloadedDataset[_TArray, _TODTarget, _TRawTarget, _TAnnotation],
282
284
  BaseDatasetMixin[_TArray],
283
- BaseDataset[_TArray, ObjectDetectionTarget],
285
+ BaseDataset[_TArray, _TODTarget],
286
+ od.Dataset,
284
287
  ):
285
288
  """
286
289
  Base class for object detection datasets.
@@ -288,7 +291,7 @@ class BaseODDataset(
288
291
 
289
292
  _bboxes_per_size: bool = False
290
293
 
291
- def __getitem__(self, index: int) -> tuple[_TArray, ObjectDetectionTarget, DatumMetadata]:
294
+ def __getitem__(self, index: int) -> tuple[_TArray, _TODTarget, DatumMetadata]:
292
295
  """
293
296
  Args
294
297
  ----
@@ -310,7 +313,9 @@ class BaseODDataset(
310
313
  if self._bboxes_per_size and boxes:
311
314
  boxes = boxes * np.asarray([[img_size[1], img_size[2], img_size[1], img_size[2]]])
312
315
  # Create the Object Detection Target
313
- target = ObjectDetectionTarget(self._as_array(boxes), self._as_array(labels), self._one_hot_encode(labels))
316
+ target = ObjectDetectionTargetTuple(self._as_array(boxes), self._as_array(labels), self._one_hot_encode(labels))
317
+ # Cast target explicitly to ODTarget as namedtuple does not provide any typing metadata
318
+ target = cast(_TODTarget, target)
314
319
 
315
320
  img_metadata = {key: val[index] for key, val in self._datum_metadata.items()}
316
321
  img_metadata = img_metadata | additional_metadata
@@ -324,6 +329,15 @@ class BaseODDataset(
324
329
  NumpyArray = NDArray[np.floating[Any]] | NDArray[np.integer[Any]]
325
330
 
326
331
 
332
+ class NumpyObjectDetectionTarget(od.ObjectDetectionTarget, Protocol):
333
+ @property
334
+ def boxes(self) -> NumpyArray: ...
335
+ @property
336
+ def labels(self) -> NumpyArray: ...
337
+ @property
338
+ def scores(self) -> NumpyArray: ...
339
+
340
+
327
341
  class BaseDatasetNumpyMixin(BaseDatasetMixin[NumpyArray]):
328
342
  def _as_array(self, raw: list[Any]) -> NumpyArray:
329
343
  return np.asarray(raw)
@@ -347,8 +361,8 @@ NumpyImageClassificationDatumTransform = Callable[
347
361
  tuple[NumpyArray, NumpyArray, DatumMetadata],
348
362
  ]
349
363
  NumpyObjectDetectionDatumTransform = Callable[
350
- [tuple[NumpyArray, ObjectDetectionTarget, DatumMetadata]],
351
- tuple[NumpyArray, ObjectDetectionTarget, DatumMetadata],
364
+ [tuple[NumpyArray, NumpyObjectDetectionTarget, DatumMetadata]],
365
+ tuple[NumpyArray, NumpyObjectDetectionTarget, DatumMetadata],
352
366
  ]
353
367
  NumpyImageClassificationTransform = NumpyImageTransform | NumpyImageClassificationDatumTransform
354
368
  NumpyObjectDetectionTransform = NumpyImageTransform | NumpyObjectDetectionDatumTransform
@@ -0,0 +1,3 @@
1
+ from ._huggingface import HFImageClassificationDataset, HFObjectDetectionDataset, from_huggingface
2
+
3
+ __all__ = ["HFImageClassificationDataset", "HFObjectDetectionDataset", "from_huggingface"]
@@ -0,0 +1,391 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from dataclasses import dataclass
5
+ from functools import lru_cache
6
+ from typing import Any, Literal, TypeAlias, overload
7
+
8
+ import maite.protocols.image_classification as ic
9
+ import maite.protocols.object_detection as od
10
+ import numpy as np
11
+ from maite.protocols import DatasetMetadata, DatumMetadata
12
+
13
+ from maite_datasets._base import BaseDataset, NumpyArray, ObjectDetectionTargetTuple
14
+ from maite_datasets.protocols import HFArray, HFClassLabel, HFDataset, HFImage, HFList, HFValue
15
+ from maite_datasets.wrappers._torch import TTarget
16
+
17
+ # Constants for image processing
18
+ MAX_VALID_CHANNELS = 10
19
+
20
+ FeatureDict: TypeAlias = Mapping[str, Any]
21
+
22
+
23
+ @dataclass
24
+ class HFDatasetInfo:
25
+ image_key: str
26
+
27
+
28
+ @dataclass
29
+ class HFImageClassificationDatasetInfo(HFDatasetInfo):
30
+ label_key: str
31
+
32
+
33
+ @dataclass
34
+ class HFObjectDetectionDatasetInfo(HFDatasetInfo):
35
+ objects_key: str
36
+ bbox_key: str
37
+ label_key: str
38
+
39
+
40
+ class HFBaseDataset(BaseDataset[NumpyArray, TTarget]):
41
+ """Base wrapper for Hugging Face datasets, handling common logic."""
42
+
43
+ def __init__(self, hf_dataset: HFDataset, image_key: str, known_keys: set[str]) -> None:
44
+ self.source = hf_dataset
45
+ self._image_key = image_key
46
+
47
+ # Add dataset metadata
48
+ dataset_info_dict = hf_dataset.info.__dict__
49
+ if "id" in dataset_info_dict:
50
+ dataset_info_dict["datasetinfo_id"] = dataset_info_dict.pop("id")
51
+ self._metadata_id = dataset_info_dict["dataset_name"]
52
+ self._metadata_dict = dataset_info_dict
53
+
54
+ # Pre-validate features and cache metadata keys
55
+ self._validate_features(hf_dataset.features)
56
+ self._scalar_meta_keys = self._extract_scalar_meta_keys(hf_dataset.features, known_keys)
57
+
58
+ # Cache for image conversions
59
+ self._image_cache: dict[int, np.ndarray] = {}
60
+
61
+ def _validate_features(self, features: FeatureDict) -> None:
62
+ """Pre-validate all features during initialization."""
63
+ if self._image_key not in features:
64
+ raise ValueError(f"Image key '{self._image_key}' not found in dataset features.")
65
+
66
+ if not isinstance(features[self._image_key], (HFImage, HFArray)):
67
+ raise TypeError(f"Image feature '{self._image_key}' must be HFImage or HFArray.")
68
+
69
+ def _extract_scalar_meta_keys(self, features: FeatureDict, known_keys: set[str]) -> list[str]:
70
+ """Extract scalar metadata keys during initialization."""
71
+ return [key for key, feature in features.items() if key not in known_keys and isinstance(feature, HFValue)]
72
+
73
+ def __len__(self) -> int:
74
+ return len(self.source)
75
+
76
+ def _get_base_metadata(self, index: int) -> DatumMetadata:
77
+ """Extract base metadata for a datum."""
78
+ item = self.source[index]
79
+ datum_metadata: DatumMetadata = {"id": index}
80
+ for key in self._scalar_meta_keys:
81
+ datum_metadata[key] = item[key]
82
+ return datum_metadata
83
+
84
+ @lru_cache(maxsize=64) # Cache image conversions
85
+ def _get_image(self, index: int) -> np.ndarray:
86
+ """Get and process image with caching and optimized conversions."""
87
+ # Convert to numpy array only once
88
+ raw_image = self.source[index][self._image_key]
89
+ image = np.asarray(raw_image)
90
+
91
+ # Handle different image formats efficiently
92
+ if image.ndim == 2:
93
+ # Grayscale: HW -> CHW
94
+ image = image[np.newaxis, ...] # More efficient than expand_dims
95
+ elif image.ndim == 3:
96
+ # Check if we need to transpose from HWC to CHW
97
+ if image.shape[-1] < image.shape[-3] and image.shape[-1] <= MAX_VALID_CHANNELS:
98
+ # HWC -> CHW using optimized transpose
99
+ image = np.transpose(image, (2, 0, 1))
100
+ elif image.shape[0] > MAX_VALID_CHANNELS:
101
+ raise ValueError(
102
+ f"Image at index {index} has invalid channel configuration. "
103
+ f"Expected channels to be less than {MAX_VALID_CHANNELS}, got shape {image.shape}"
104
+ )
105
+ else:
106
+ raise ValueError(
107
+ f"Image at index {index} has unsupported dimensionality. "
108
+ f"Expected 2D or 3D, got {image.ndim}D with shape {image.shape}"
109
+ )
110
+
111
+ if image.ndim != 3:
112
+ raise ValueError(f"Image processing failed for index {index}. Final shape: {image.shape}")
113
+
114
+ return image
115
+
116
+
117
+ class HFImageClassificationDataset(HFBaseDataset[NumpyArray], ic.Dataset):
118
+ """Wraps a Hugging Face dataset to comply with the ImageClassificationDataset protocol."""
119
+
120
+ def __init__(self, hf_dataset: HFDataset, image_key: str, label_key: str) -> None:
121
+ super().__init__(hf_dataset, image_key, known_keys={image_key, label_key})
122
+ self._label_key = label_key
123
+
124
+ # Pre-validate label feature
125
+ label_feature = hf_dataset.features[self._label_key]
126
+ if not isinstance(label_feature, HFClassLabel):
127
+ raise TypeError(
128
+ f"Label feature '{self._label_key}' must be a datasets.ClassLabel, got {type(label_feature).__name__}."
129
+ )
130
+
131
+ self._num_classes: int = label_feature.num_classes
132
+
133
+ # Pre-compute one-hot identity matrix for efficient encoding
134
+ self._one_hot_matrix = np.eye(self._num_classes, dtype=np.float32)
135
+
136
+ # Enhanced metadata with validation
137
+ self.metadata: DatasetMetadata = DatasetMetadata(
138
+ id=self._metadata_id, index2label=dict(enumerate(label_feature.names)), **self._metadata_dict
139
+ )
140
+
141
+ def __getitem__(self, index: int) -> tuple[NumpyArray, NumpyArray, DatumMetadata]:
142
+ if not 0 <= index < len(self.source):
143
+ raise IndexError(f"Index {index} out of range for dataset of size {len(self.source)}")
144
+
145
+ # Process image
146
+ image = self._get_image(index)
147
+ label_int = self.source[index][self._label_key]
148
+
149
+ # Process target
150
+ if not 0 <= label_int < self._num_classes:
151
+ raise ValueError(f"Label {label_int} at index {index} is out of range [0, {self._num_classes})")
152
+ one_hot_label = self._one_hot_matrix[label_int]
153
+
154
+ # Process metadata
155
+ datum_metadata = self._get_base_metadata(index)
156
+
157
+ return image, one_hot_label, datum_metadata
158
+
159
+
160
+ class HFObjectDetectionDataset(HFBaseDataset[ObjectDetectionTargetTuple], od.Dataset):
161
+ """Wraps a Hugging Face dataset to comply with the ObjectDetectionDataset protocol."""
162
+
163
+ def __init__(self, hf_dataset: HFDataset, image_key: str, objects_key: str, bbox_key: str, label_key: str) -> None:
164
+ super().__init__(hf_dataset, image_key, known_keys={image_key, objects_key})
165
+ self._objects_key = objects_key
166
+ self._bbox_key = bbox_key
167
+ self._label_key = label_key
168
+
169
+ # Pre-validate and extract object features
170
+ self._object_meta_keys = self._validate_and_extract_object_features(hf_dataset.features)
171
+
172
+ # Validate and extract label information
173
+ label_feature = self._extract_label_feature(hf_dataset.features)
174
+ self.metadata: DatasetMetadata = DatasetMetadata(
175
+ id=self._metadata_id, index2label=dict(enumerate(label_feature.names)), **self._metadata_dict
176
+ )
177
+
178
+ def _validate_and_extract_object_features(self, features: FeatureDict) -> list[str]:
179
+ """Validate objects feature and extract metadata keys."""
180
+ objects_feature = features[self._objects_key]
181
+
182
+ # Determine the structure and get inner features
183
+ if isinstance(objects_feature, HFList): # list(dict) case
184
+ if not isinstance(objects_feature.feature, dict):
185
+ raise TypeError(f"Objects feature '{self._objects_key}' with list type must contain dict features.")
186
+ inner_feature_dict = objects_feature.feature
187
+ elif isinstance(objects_feature, dict): # dict(list) case
188
+ inner_feature_dict = objects_feature
189
+ else:
190
+ raise TypeError(
191
+ f"Objects feature '{self._objects_key}' must be a list or dict, got {type(objects_feature).__name__}."
192
+ )
193
+
194
+ # Validate required keys exist
195
+ required_keys = {self._bbox_key, self._label_key}
196
+ missing_keys = required_keys - set(inner_feature_dict.keys())
197
+ if missing_keys:
198
+ raise ValueError(f"Objects feature '{self._objects_key}' missing required keys: {missing_keys}")
199
+
200
+ # Extract object metadata keys
201
+ known_inner_keys = {self._bbox_key, self._label_key}
202
+ return [
203
+ key
204
+ for key, feature in inner_feature_dict.items()
205
+ if key not in known_inner_keys and isinstance(feature, (HFValue, HFList))
206
+ ]
207
+
208
+ def _extract_label_feature(self, features: FeatureDict) -> HFClassLabel:
209
+ """Extract and validate the label feature."""
210
+ objects_feature = features[self._objects_key]
211
+
212
+ inner_features = objects_feature.feature if isinstance(objects_feature, HFList) else objects_feature
213
+ label_feature_container = inner_features[self._label_key]
214
+ label_feature = (
215
+ label_feature_container.feature
216
+ if isinstance(label_feature_container.feature, HFClassLabel)
217
+ else label_feature_container
218
+ )
219
+
220
+ if not isinstance(label_feature, HFClassLabel):
221
+ raise TypeError(
222
+ f"Label '{self._label_key}' in '{self._objects_key}' must be a ClassLabel, "
223
+ f"got {type(label_feature).__name__}."
224
+ )
225
+
226
+ return label_feature
227
+
228
+ def __getitem__(self, index: int) -> tuple[NumpyArray, ObjectDetectionTargetTuple, DatumMetadata]:
229
+ if not 0 <= index < len(self.source):
230
+ raise IndexError(f"Index {index} out of range for dataset of size {len(self.source)}")
231
+
232
+ # Process image
233
+ image = self._get_image(index)
234
+ objects = self.source[index][self._objects_key]
235
+
236
+ # Process target
237
+ boxes = objects[self._bbox_key]
238
+ labels = objects[self._label_key]
239
+ scores = np.zeros_like(labels, dtype=np.float32)
240
+ target = ObjectDetectionTargetTuple(boxes, labels, scores)
241
+
242
+ # Process metadata
243
+ datum_metadata = self._get_base_metadata(index)
244
+ self._add_object_metadata(objects, datum_metadata)
245
+
246
+ return image, target, datum_metadata
247
+
248
+ def _add_object_metadata(self, objects: dict[str, Any], datum_metadata: DatumMetadata) -> None:
249
+ """Efficiently add object metadata to datum metadata."""
250
+ if not objects[self._bbox_key]: # No objects
251
+ return
252
+
253
+ num_objects = len(objects[self._bbox_key])
254
+
255
+ for key in self._object_meta_keys:
256
+ value = objects[key]
257
+ if isinstance(value, list):
258
+ if len(value) == num_objects:
259
+ datum_metadata[key] = value
260
+ else:
261
+ raise ValueError(
262
+ f"Object metadata '{key}' length {len(value)} doesn't match number of objects {num_objects}"
263
+ )
264
+ else:
265
+ datum_metadata[key] = [value] * num_objects
266
+
267
+
268
+ def is_bbox(feature: Any) -> bool:
269
+ """Check if feature represents bounding box data with proper type validation."""
270
+ if not isinstance(feature, HFList):
271
+ return False
272
+
273
+ # Handle nested list structure
274
+ bbox_candidate = feature.feature if isinstance(feature.feature, HFList) else feature
275
+
276
+ return (
277
+ isinstance(bbox_candidate, HFList)
278
+ and bbox_candidate.length == 4
279
+ and isinstance(bbox_candidate.feature, HFValue)
280
+ and any(dtype in bbox_candidate.feature.dtype for dtype in ["float", "int"])
281
+ )
282
+
283
+
284
+ def is_label(feature: Any) -> bool:
285
+ """Check if feature represents label data with proper type validation."""
286
+ target_feature = feature.feature if isinstance(feature, HFList) else feature
287
+ return isinstance(target_feature, HFClassLabel)
288
+
289
+
290
+ def find_od_keys(feature: Any) -> tuple[str | None, str | None]:
291
+ """Helper to find bbox and label keys for object detection with improved logic."""
292
+ if not ((isinstance(feature, HFList) and isinstance(feature.feature, dict)) or isinstance(feature, dict)):
293
+ return None, None
294
+
295
+ inner_features: FeatureDict = feature.feature if isinstance(feature, HFList) else feature
296
+
297
+ bbox_key = label_key = None
298
+
299
+ for inner_name, inner_feature in inner_features.items():
300
+ if bbox_key is None and is_bbox(inner_feature):
301
+ bbox_key = inner_name
302
+ if label_key is None and is_label(inner_feature):
303
+ label_key = inner_name
304
+
305
+ # Early exit if both found
306
+ if bbox_key and label_key:
307
+ break
308
+
309
+ return bbox_key, label_key
310
+
311
+
312
+ def get_dataset_info(dataset: HFDataset) -> HFDatasetInfo:
313
+ """Extract dataset information with improved validation and error messages."""
314
+ features = dataset.features
315
+ image_key = label_key = objects_key = bbox_key = None
316
+
317
+ # More efficient feature detection
318
+ for name, feature in features.items():
319
+ if image_key is None and isinstance(feature, (HFImage, HFArray)):
320
+ image_key = name
321
+ elif label_key is None and isinstance(feature, HFClassLabel):
322
+ label_key = name
323
+ elif objects_key is None:
324
+ temp_bbox, temp_label = find_od_keys(feature)
325
+ if temp_bbox and temp_label:
326
+ objects_key, bbox_key, label_key = name, temp_bbox, temp_label
327
+
328
+ if not image_key:
329
+ available_features = list(features.keys())
330
+ raise ValueError(
331
+ f"No image key found in dataset. Available features: {available_features}. "
332
+ f"Expected HFImage or HFArray type."
333
+ )
334
+
335
+ # Return appropriate dataset info based on detected features
336
+ if objects_key and bbox_key and label_key:
337
+ return HFObjectDetectionDatasetInfo(image_key, objects_key, bbox_key, label_key)
338
+ if label_key:
339
+ return HFImageClassificationDatasetInfo(image_key, label_key)
340
+ return HFDatasetInfo(image_key)
341
+
342
+
343
+ @overload
344
+ def from_huggingface(dataset: HFDataset, task: Literal["image_classification"]) -> HFImageClassificationDataset: ...
345
+
346
+
347
+ @overload
348
+ def from_huggingface(dataset: HFDataset, task: Literal["object_detection"]) -> HFObjectDetectionDataset: ...
349
+
350
+
351
+ @overload
352
+ def from_huggingface(
353
+ dataset: HFDataset, task: Literal["auto"] = "auto"
354
+ ) -> HFObjectDetectionDataset | HFImageClassificationDataset: ...
355
+
356
+
357
+ def from_huggingface(
358
+ dataset: HFDataset, task: Literal["image_classification", "object_detection", "auto"] = "auto"
359
+ ) -> HFObjectDetectionDataset | HFImageClassificationDataset:
360
+ """Create appropriate dataset wrapper with enhanced error handling."""
361
+ info = get_dataset_info(dataset)
362
+
363
+ if isinstance(info, HFImageClassificationDatasetInfo):
364
+ if task in ("image_classification", "auto"):
365
+ return HFImageClassificationDataset(dataset, info.image_key, info.label_key)
366
+ if task == "object_detection":
367
+ raise ValueError(
368
+ f"Task mismatch: requested 'object_detection' but dataset appears to be "
369
+ f"image classification. Detected features: image='{info.image_key}', "
370
+ f"label='{info.label_key}'"
371
+ )
372
+
373
+ elif isinstance(info, HFObjectDetectionDatasetInfo):
374
+ if task in ("object_detection", "auto"):
375
+ return HFObjectDetectionDataset(dataset, info.image_key, info.objects_key, info.bbox_key, info.label_key)
376
+ if task == "image_classification":
377
+ raise ValueError(
378
+ f"Task mismatch: requested 'image_classification' but dataset appears to be "
379
+ f"object detection. Detected features: image='{info.image_key}', "
380
+ f"objects='{info.objects_key}'"
381
+ )
382
+
383
+ # Enhanced error message for auto-detection failure
384
+ available_features = list(dataset.features.keys())
385
+ feature_types = {k: type(v).__name__ for k, v in dataset.features.items()}
386
+
387
+ raise ValueError(
388
+ f"Could not automatically determine task for requested type '{task}'. "
389
+ f"Detected info: {info}. Available features: {available_features}. "
390
+ f"Feature types: {feature_types}. Ensure dataset has proper image and label/objects features."
391
+ )
@@ -13,11 +13,12 @@ from maite_datasets._base import (
13
13
  BaseODDataset,
14
14
  DataLocation,
15
15
  NumpyArray,
16
+ NumpyObjectDetectionTarget,
16
17
  NumpyObjectDetectionTransform,
17
18
  )
18
19
 
19
20
 
20
- class AntiUAVDetection(BaseODDataset[NumpyArray, list[str], str], BaseDatasetNumpyMixin):
21
+ class AntiUAVDetection(BaseODDataset[NumpyArray, NumpyObjectDetectionTarget, list[str], str], BaseDatasetNumpyMixin):
21
22
  """
22
23
  A UAV detection dataset focused on detecting UAVs in natural images against large variation in backgrounds.
23
24
 
@@ -11,7 +11,7 @@ import numpy as np
11
11
  from maite.protocols import DatasetMetadata, DatumMetadata
12
12
  from PIL import Image
13
13
 
14
- from maite_datasets._base import BaseDataset, ObjectDetectionTarget
14
+ from maite_datasets._base import BaseDataset, ObjectDetectionTargetTuple
15
15
  from maite_datasets._reader import BaseDatasetReader
16
16
 
17
17
 
@@ -269,7 +269,7 @@ class COCODataset(BaseDataset):
269
269
  scores = np.empty(0, dtype=np.float32)
270
270
  annotation_metadata = []
271
271
 
272
- target = ObjectDetectionTarget(boxes, labels, scores)
272
+ target = ObjectDetectionTargetTuple(boxes, labels, scores)
273
273
 
274
274
  # Create comprehensive datum metadata
275
275
  datum_metadata = DatumMetadata(
@@ -11,11 +11,12 @@ from maite_datasets._base import (
11
11
  BaseODDataset,
12
12
  DataLocation,
13
13
  NumpyArray,
14
+ NumpyObjectDetectionTarget,
14
15
  NumpyObjectDetectionTransform,
15
16
  )
16
17
 
17
18
 
18
- class MILCO(BaseODDataset[NumpyArray, list[str], str], BaseDatasetNumpyMixin):
19
+ class MILCO(BaseODDataset[NumpyArray, NumpyObjectDetectionTarget, list[str], str], BaseDatasetNumpyMixin):
19
20
  """
20
21
  A side-scan sonar dataset focused on mine-like object detection.
21
22
 
@@ -12,6 +12,7 @@ from maite_datasets._base import (
12
12
  BaseODDataset,
13
13
  DataLocation,
14
14
  NumpyArray,
15
+ NumpyObjectDetectionTarget,
15
16
  NumpyObjectDetectionTransform,
16
17
  )
17
18
  from maite_datasets._fileio import _ensure_exists
@@ -20,6 +21,7 @@ from maite_datasets._fileio import _ensure_exists
20
21
  class SeaDrone(
21
22
  BaseODDataset[
22
23
  NumpyArray,
24
+ NumpyObjectDetectionTarget,
23
25
  list[tuple[list[int], list[list[float]]]],
24
26
  tuple[list[int], list[list[float]]],
25
27
  ],
@@ -16,8 +16,8 @@ from maite_datasets._base import (
16
16
  BaseODDataset,
17
17
  DataLocation,
18
18
  NumpyArray,
19
+ NumpyObjectDetectionTarget,
19
20
  NumpyObjectDetectionTransform,
20
- ObjectDetectionTarget,
21
21
  _ensure_exists,
22
22
  )
23
23
 
@@ -46,7 +46,7 @@ VOCClassStringMap = Literal[
46
46
  TVOCClassMap = TypeVar("TVOCClassMap", VOCClassStringMap, int, list[VOCClassStringMap], list[int])
47
47
 
48
48
 
49
- class BaseVOCDataset(BaseDownloadedDataset[NumpyArray, ObjectDetectionTarget, list[str], str]):
49
+ class BaseVOCDataset(BaseDownloadedDataset[NumpyArray, NumpyObjectDetectionTarget, list[str], str]):
50
50
  _resources = [
51
51
  DataLocation(
52
52
  url="https://data.brainchip.com/dataset-mirror/voc/VOCtrainval_11-May-2012.tar",
@@ -431,7 +431,7 @@ class BaseVOCDataset(BaseDownloadedDataset[NumpyArray, ObjectDetectionTarget, li
431
431
 
432
432
  class VOCDetection(
433
433
  BaseVOCDataset,
434
- BaseODDataset[NumpyArray, list[str], str],
434
+ BaseODDataset[NumpyArray, NumpyObjectDetectionTarget, list[str], str],
435
435
  BaseDatasetNumpyMixin,
436
436
  ):
437
437
  """
@@ -12,7 +12,7 @@ import numpy as np
12
12
  from maite.protocols import DatasetMetadata, DatumMetadata
13
13
  from PIL import Image
14
14
 
15
- from maite_datasets._base import BaseDataset, ObjectDetectionTarget
15
+ from maite_datasets._base import BaseDataset, ObjectDetectionTargetTuple
16
16
  from maite_datasets._reader import BaseDatasetReader
17
17
 
18
18
 
@@ -297,7 +297,7 @@ class YOLODataset(BaseDataset):
297
297
  labels = np.empty(0, dtype=np.int64)
298
298
  scores = np.empty(0, dtype=np.float32)
299
299
 
300
- target = ObjectDetectionTarget(boxes, labels, scores)
300
+ target = ObjectDetectionTargetTuple(boxes, labels, scores)
301
301
 
302
302
  # Create comprehensive datum metadata
303
303
  datum_metadata = DatumMetadata(
@@ -2,8 +2,8 @@
2
2
  Common type protocols used for interoperability.
3
3
  """
4
4
 
5
- from collections.abc import Iterator
6
- from typing import Any, Protocol, runtime_checkable
5
+ from collections.abc import Iterable, Iterator, Mapping, Sequence
6
+ from typing import Any, Generic, Protocol, TypeVar, overload, runtime_checkable
7
7
 
8
8
 
9
9
  @runtime_checkable
@@ -21,3 +21,85 @@ class Array(Protocol):
21
21
  def __getitem__(self, key: Any, /) -> Any: ...
22
22
  def __iter__(self) -> Iterator[Any]: ...
23
23
  def __len__(self) -> int: ...
24
+
25
+
26
+ TBoxes = TypeVar("TBoxes", Array, Sequence)
27
+ TLabels = TypeVar("TLabels", Array, Sequence)
28
+ TScores = TypeVar("TScores", Array, Sequence)
29
+
30
+
31
+ class GenericObjectDetectionTarget(Generic[TBoxes, TLabels, TScores], Protocol):
32
+ boxes: TBoxes
33
+ labels: TLabels
34
+ scores: TScores
35
+
36
+
37
+ @runtime_checkable
38
+ class HFDatasetInfo(Protocol):
39
+ @property
40
+ def dataset_name(self) -> str: ...
41
+
42
+
43
+ @runtime_checkable
44
+ class HFDataset(Protocol):
45
+ @property
46
+ def features(self) -> Mapping[str, Any]: ...
47
+
48
+ @property
49
+ def builder_name(self) -> str | None: ...
50
+
51
+ @property
52
+ def info(self) -> HFDatasetInfo: ...
53
+
54
+ @overload
55
+ def __getitem__(self, key: int | slice | Iterable[int]) -> dict[str, Any]: ...
56
+ @overload
57
+ def __getitem__(self, key: str) -> Sequence[int]: ...
58
+ def __getitem__(self, key: str | int | slice | Iterable[int]) -> dict[str, Any] | Sequence[int]: ...
59
+
60
+ def __len__(self) -> int: ...
61
+
62
+
63
+ @runtime_checkable
64
+ class HFFeature(Protocol):
65
+ @property
66
+ def _type(self) -> str: ...
67
+
68
+
69
+ @runtime_checkable
70
+ class HFClassLabel(HFFeature, Protocol):
71
+ @property
72
+ def names(self) -> list[str]: ...
73
+
74
+ @property
75
+ def num_classes(self) -> int: ...
76
+
77
+
78
+ @runtime_checkable
79
+ class HFImage(HFFeature, Protocol):
80
+ @property
81
+ def decode(self) -> bool: ...
82
+
83
+
84
+ @runtime_checkable
85
+ class HFArray(HFFeature, Protocol):
86
+ @property
87
+ def shape(self) -> tuple[int, ...]: ...
88
+ @property
89
+ def dtype(self) -> str: ...
90
+
91
+
92
+ @runtime_checkable
93
+ class HFList(HFFeature, Protocol):
94
+ @property
95
+ def feature(self) -> Any: ...
96
+ @property
97
+ def length(self) -> int: ...
98
+
99
+
100
+ @runtime_checkable
101
+ class HFValue(HFFeature, Protocol):
102
+ @property
103
+ def pa_type(self) -> Any: ... # pyarrow type ... not documented
104
+ @property
105
+ def dtype(self) -> str: ...
@@ -1,22 +1,31 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Any, Callable, Generic, TypeAlias, TypeVar, cast, overload
3
+ from typing import Any, Callable, Generic, Protocol, TypeAlias, TypeVar, cast, overload
4
4
 
5
- import numpy as np
6
5
  import torch
7
6
  from maite.protocols import DatasetMetadata, DatumMetadata
8
- from maite.protocols.object_detection import ObjectDetectionTarget as _ObjectDetectionTarget
7
+ from maite.protocols.object_detection import ObjectDetectionTarget
9
8
  from torch import Tensor
10
9
  from torchvision.tv_tensors import BoundingBoxes, Image
11
10
 
12
- from maite_datasets._base import BaseDataset, ObjectDetectionTarget
11
+ from maite_datasets._base import BaseDataset, ObjectDetectionTargetTuple
13
12
  from maite_datasets.protocols import Array
14
13
 
15
14
  TArray = TypeVar("TArray", bound=Array)
16
15
  TTarget = TypeVar("TTarget")
17
16
 
17
+
18
+ class TorchvisionObjectDetectionTarget(Protocol):
19
+ @property
20
+ def boxes(self) -> BoundingBoxes: ...
21
+ @property
22
+ def labels(self) -> Tensor: ...
23
+ @property
24
+ def scores(self) -> Tensor: ...
25
+
26
+
18
27
  TorchvisionImageClassificationDatum: TypeAlias = tuple[Image, Tensor, DatumMetadata]
19
- TorchvisionObjectDetectionDatum: TypeAlias = tuple[Image, ObjectDetectionTarget, DatumMetadata]
28
+ TorchvisionObjectDetectionDatum: TypeAlias = tuple[Image, ObjectDetectionTargetTuple, DatumMetadata]
20
29
 
21
30
 
22
31
  class TorchvisionWrapper(Generic[TArray, TTarget]):
@@ -63,31 +72,30 @@ class TorchvisionWrapper(Generic[TArray, TTarget]):
63
72
  @overload
64
73
  def __getitem__(
65
74
  self: TorchvisionWrapper[TArray, TTarget], index: int
66
- ) -> tuple[Image, ObjectDetectionTarget, DatumMetadata]: ...
75
+ ) -> tuple[Image, TorchvisionObjectDetectionTarget, DatumMetadata]: ...
67
76
 
68
- def __getitem__(self, index: int) -> tuple[Image, Tensor | ObjectDetectionTarget, DatumMetadata]:
77
+ def __getitem__(self, index: int) -> tuple[Image, Tensor | TorchvisionObjectDetectionTarget, DatumMetadata]:
69
78
  """Get item with torch tensor conversion."""
70
79
  image, target, metadata = self._dataset[index]
71
80
 
72
81
  # Convert image to torch tensor
73
- torch_image = torch.from_numpy(image) if isinstance(image, np.ndarray) else torch.as_tensor(image)
74
- torch_image = Image(torch_image)
82
+ torch_image = Image(torch.tensor(image))
75
83
 
76
84
  # Handle different target types
77
85
  if isinstance(target, Array):
78
86
  # Image classification case
79
- torch_target = torch.as_tensor(target, dtype=torch.float32)
87
+ torch_target = torch.tensor(target, dtype=torch.float32)
80
88
  torch_datum = self._transform((torch_image, torch_target, metadata))
81
89
  return cast(TorchvisionImageClassificationDatum, torch_datum)
82
90
 
83
- if isinstance(target, _ObjectDetectionTarget):
91
+ if isinstance(target, ObjectDetectionTarget):
84
92
  # Object detection case
85
93
  torch_boxes = BoundingBoxes(
86
- torch.as_tensor(target.boxes), format="XYXY", canvas_size=(torch_image.shape[-2], torch_image.shape[-1])
94
+ torch.tensor(target.boxes), format="XYXY", canvas_size=(torch_image.shape[-2], torch_image.shape[-1])
87
95
  ) # type: ignore
88
- torch_labels = torch.as_tensor(target.labels, dtype=torch.int64)
89
- torch_scores = torch.as_tensor(target.scores, dtype=torch.float32)
90
- torch_target = ObjectDetectionTarget(torch_boxes, torch_labels, torch_scores)
96
+ torch_labels = torch.tensor(target.labels, dtype=torch.int64)
97
+ torch_scores = torch.tensor(target.scores, dtype=torch.float32)
98
+ torch_target = ObjectDetectionTargetTuple(torch_boxes, torch_labels, torch_scores)
91
99
  torch_datum = self._transform((torch_image, torch_target, metadata))
92
100
  return cast(TorchvisionObjectDetectionDatum, torch_datum)
93
101
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: maite-datasets
3
- Version: 0.0.6
3
+ Version: 0.0.8
4
4
  Summary: A collection of Image Classification and Object Detection task datasets conforming to the MAITE protocol.
5
5
  Author-email: Andrew Weng <andrew.weng@ariacoustics.com>, Ryan Wood <ryan.wood@ariacoustics.com>, Shaun Jullens <shaun.jullens@ariacoustics.com>
6
6
  License-Expression: MIT
@@ -10,11 +10,11 @@ Classifier: Framework :: Pytest
10
10
  Classifier: License :: OSI Approved :: MIT License
11
11
  Classifier: Operating System :: OS Independent
12
12
  Classifier: Programming Language :: Python :: 3 :: Only
13
- Classifier: Programming Language :: Python :: 3.9
14
13
  Classifier: Programming Language :: Python :: 3.10
15
14
  Classifier: Programming Language :: Python :: 3.11
16
15
  Classifier: Programming Language :: Python :: 3.12
17
- Requires-Python: >=3.9
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Requires-Python: >=3.10
18
18
  Requires-Dist: defusedxml>=0.7.1
19
19
  Requires-Dist: maite<0.9,>=0.7
20
20
  Requires-Dist: numpy>=1.24.2
@@ -81,6 +81,8 @@ tuple(<class 'numpy.ndarray'>, <class 'numpy.ndarray'>, <class 'dict'>)
81
81
 
82
82
  Wrappers provide a way to convert datasets to allow usage of tools within specific backend frameworks.
83
83
 
84
+ ### Torchvision
85
+
84
86
  `TorchvisionWrapper` is a convenience class that wraps any of the datasets and provides the capability to apply
85
87
  `torchvision` transforms to the dataset.
86
88
 
@@ -129,6 +131,41 @@ type=Image, shape=torch.Size([3, 224, 224])
129
131
  tensor([16.4062, 47.4688, 28.4375, 54.0312], dtype=torch.float64)
130
132
  ```
131
133
 
134
+ ## Dataset Adapters
135
+
136
+ Adapters provide a way to read in datasets from other popular formats.
137
+
138
+ ### Huggingface
139
+
140
+ Hugging face datasets can be adapted into MAITE compliant format using the `from_huggingface` adapter.
141
+
142
+ ```python
143
+ >>> from datasets import load_dataset
144
+ >>> from maite_datasets.adapters import from_huggingface
145
+
146
+ >>> cppe5 = load_dataset("cppe-5")
147
+ >>> m_cppe5 = from_huggingface(cppe5["train"])
148
+ >>> print(m_cppe5)
149
+ HFObjectDetection Dataset
150
+ -------------------------
151
+ Source: Dataset({
152
+ features: ['image_id', 'image', 'width', 'height', 'objects'],
153
+ num_rows: 1000
154
+ })
155
+ Metadata: {'id': 'cppe-5', 'index2label': {0: 'Coverall', 1: 'Face_Shield', 2: 'Gloves', 3: 'Goggles', 4: 'Mask'}, 'description': '', 'citation': '', 'homepage': '', 'license': '', 'features': {'image_id': Value('int64'), 'image': Image(mode=None, decode=True), 'width': Value('int32'), 'height': Value('int32'), 'objects': {'id': List(Value('int64')), 'area': List(Value('int64')), 'bbox': List(List(Value('float32'), length=4)), 'category': List(ClassLabel(names=['Coverall', 'Face_Shield', 'Gloves', 'Goggles', 'Mask']))}}, 'post_processed': None, 'supervised_keys': None, 'builder_name': 'parquet', 'dataset_name': 'cppe-5', 'config_name': 'default', 'version': 0.0.0, 'splits': {'train': SplitInfo(name='train', num_bytes=240478590, num_examples=1000, shard_lengths=None, dataset_name='cppe-5'), 'test': SplitInfo(name='test', num_bytes=4172706, num_examples=29, shard_lengths=None, dataset_name='cppe-5')}, 'download_checksums': {'hf://datasets/cppe-5@66f6a5efd474e35bd7cb94bf15dea27d4c6ad3f8/data/train-00000-of-00001.parquet': {'num_bytes': 237015519, 'checksum': None}, 'hf://datasets/cppe-5@66f6a5efd474e35bd7cb94bf15dea27d4c6ad3f8/data/test-00000-of-00001.parquet': {'num_bytes': 4137134, 'checksum': None}}, 'download_size': 241152653, 'post_processing_size': None, 'dataset_size': 244651296, 'size_in_bytes': 485803949}
156
+
157
+ >>> image = m_cppe5[0][0]
158
+ >>> print(f"type={image.__class__.__name__}, shape={image.shape}")
159
+ type=ndarray, shape=(3, 663, 943)
160
+
161
+ >>> target = m_cppe5[0][1]
162
+ >>> print(f"box={target.boxes[0]}, label={target.labels[0]}")
163
+ box=[302.0, 109.0, 73.0, 52.0], label=4
164
+
165
+ >>> print(m_cppe5[0][2])
166
+ {'id': [114, 115, 116, 117], 'image_id': 15, 'width': 943, 'height': 663, 'area': [3796, 1596, 152768, 81002]}
167
+ ```
168
+
132
169
  ## Additional Information
133
170
 
134
171
  For more information on the MAITE protocol, check out their [documentation](https://mit-ll-ai-technology.github.io/maite/).
@@ -0,0 +1,28 @@
1
+ maite_datasets/__init__.py,sha256=Z_HyAe08HaHMjzZS2afFumBXYFRFj0ny5ZAIp0hcj4w,569
2
+ maite_datasets/_base.py,sha256=6-RG3VJfpNEC7luNGCPpqyIQ5FlWeYRSm_bfo1PXdUQ,12924
3
+ maite_datasets/_builder.py,sha256=MnCh6z5hSINlzBnK_pdbgI5zSg5d1uq4UvXt3cjn9hs,9820
4
+ maite_datasets/_collate.py,sha256=pwUnmrbJH5olFjSwF-ZkGdfopTWUUlwmq0d5KzERcy8,4052
5
+ maite_datasets/_fileio.py,sha256=7S-hF3xU60AdcsPsfYR7rjbeGZUlv3JjGEZhGJOxGYU,5622
6
+ maite_datasets/_reader.py,sha256=tJqsjfXaK-mrs0Ed4BktombFMmNwCur35W7tuYCflKM,5569
7
+ maite_datasets/_validate.py,sha256=Uokbolmv1uSv98sph44HON0HEieeK3s2mqbPMP1d5xs,6948
8
+ maite_datasets/protocols.py,sha256=PcIVJzUc5com_pmhzh0CPb0IkrmFof6WtUQXm7_6Vko,2450
9
+ maite_datasets/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ maite_datasets/adapters/__init__.py,sha256=PDMdvRqLVS7x6ghJ_xUBD5Ebq5HrV5k3p4TqK2_6Gt8,191
11
+ maite_datasets/adapters/_huggingface.py,sha256=dZa2guKeto-oKMM9J_TaI4DSPK18u7PvK3AsHQp3eQc,16199
12
+ maite_datasets/image_classification/__init__.py,sha256=pcZojkdsiMoLgY4mKjoQY6WyEwiGYHxNrAGpnvn3zsY,308
13
+ maite_datasets/image_classification/_cifar10.py,sha256=rrkJZ70NBYOSGxligXyakVxNOyGAglN6PaGQazuWNO4,8453
14
+ maite_datasets/image_classification/_mnist.py,sha256=9isgdi-YXgs6nXoh1j8uOgh4_sIhBIky72Vyl866rTE,8192
15
+ maite_datasets/image_classification/_ships.py,sha256=nWhte8592lpybhQCCdgT36LnuMQ0PRJWlDxT5-IPUtk,5137
16
+ maite_datasets/object_detection/__init__.py,sha256=171KT_X6I4YGy18G240N_-ZsKvXJ6YqcBDzkhTiBj2E,587
17
+ maite_datasets/object_detection/_antiuav.py,sha256=8b_AlX5xb5Fu02pG-dldFrHtVZLZzb6ZyGGYj-b7Lt0,8325
18
+ maite_datasets/object_detection/_coco.py,sha256=vTPQmLSgIM7lNb2PEaCrs1fK7_82j3tXFQQ3A5e0dlM,10319
19
+ maite_datasets/object_detection/_milco.py,sha256=aX10PbWjIFyajS5ibGA219fZd7tZTkAlvpThoTZpxtM,7988
20
+ maite_datasets/object_detection/_seadrone.py,sha256=wKcyKQhcvNj5NrLMOD-9KdGaTaBnE6F9uif_J-PKNug,271281
21
+ maite_datasets/object_detection/_voc.py,sha256=CDFgSpbQZgpSq51CKd9twIUHr6l_GAViVGpmFUNX0Dg,19627
22
+ maite_datasets/object_detection/_yolo.py,sha256=GIa43Ec_542N7PRo0XngV-Npw3n4yoIND3tI7vas3u0,11877
23
+ maite_datasets/wrappers/__init__.py,sha256=6uI0ztOB2IlMWln9JkVke4OhU2HQ8i6YCaCNq_q5qb0,225
24
+ maite_datasets/wrappers/_torch.py,sha256=gYcAdrarcX_DiV51MD1cpOa9YAZGEmWblJEyyLS7rRs,4687
25
+ maite_datasets-0.0.8.dist-info/METADATA,sha256=1srwva41Dtqwda2AGc0GwnjQs9f-sFMVSLaMrVQJv7Q,7845
26
+ maite_datasets-0.0.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
27
+ maite_datasets-0.0.8.dist-info/licenses/LICENSE,sha256=6h3J3R-ajGHh_isDSftzS5_jJjB9HH4TaI0vU-VscaY,1082
28
+ maite_datasets-0.0.8.dist-info/RECORD,,
@@ -1,26 +0,0 @@
1
- maite_datasets/__init__.py,sha256=Z_HyAe08HaHMjzZS2afFumBXYFRFj0ny5ZAIp0hcj4w,569
2
- maite_datasets/_base.py,sha256=VEd4ipHPAOCbz4Zm8zdI2yQwQ_x9O4Wq01xoZ2QvNYo,12366
3
- maite_datasets/_builder.py,sha256=MnCh6z5hSINlzBnK_pdbgI5zSg5d1uq4UvXt3cjn9hs,9820
4
- maite_datasets/_collate.py,sha256=pwUnmrbJH5olFjSwF-ZkGdfopTWUUlwmq0d5KzERcy8,4052
5
- maite_datasets/_fileio.py,sha256=7S-hF3xU60AdcsPsfYR7rjbeGZUlv3JjGEZhGJOxGYU,5622
6
- maite_datasets/_reader.py,sha256=tJqsjfXaK-mrs0Ed4BktombFMmNwCur35W7tuYCflKM,5569
7
- maite_datasets/_validate.py,sha256=Uokbolmv1uSv98sph44HON0HEieeK3s2mqbPMP1d5xs,6948
8
- maite_datasets/protocols.py,sha256=YGXb-WxlneXdIQBfBy5OdbylHSVfM-RBXeGvpiWwfLU,607
9
- maite_datasets/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- maite_datasets/image_classification/__init__.py,sha256=pcZojkdsiMoLgY4mKjoQY6WyEwiGYHxNrAGpnvn3zsY,308
11
- maite_datasets/image_classification/_cifar10.py,sha256=rrkJZ70NBYOSGxligXyakVxNOyGAglN6PaGQazuWNO4,8453
12
- maite_datasets/image_classification/_mnist.py,sha256=9isgdi-YXgs6nXoh1j8uOgh4_sIhBIky72Vyl866rTE,8192
13
- maite_datasets/image_classification/_ships.py,sha256=nWhte8592lpybhQCCdgT36LnuMQ0PRJWlDxT5-IPUtk,5137
14
- maite_datasets/object_detection/__init__.py,sha256=171KT_X6I4YGy18G240N_-ZsKvXJ6YqcBDzkhTiBj2E,587
15
- maite_datasets/object_detection/_antiuav.py,sha256=B20JrbouDM1o5f1ct9Zfbkks8NaVqYrxu5x-rBZvGx8,8265
16
- maite_datasets/object_detection/_coco.py,sha256=3abRQJ9ATcZOeqK-4pnMfr-pv7aGcRum88SRlLLXTzk,10309
17
- maite_datasets/object_detection/_milco.py,sha256=brxxYs5ak0vEpOSd2IW5AMMVkuadVmXCJBFPvXTmNlo,7928
18
- maite_datasets/object_detection/_seadrone.py,sha256=JdHL0eRZoe7pXVInOq5Xpnz3-vgeBxbO25oTYgGZ44o,271213
19
- maite_datasets/object_detection/_voc.py,sha256=vgRn-sa_r2-hxwpM3veRZQMcWyqJz9OGalABOccZeow,19589
20
- maite_datasets/object_detection/_yolo.py,sha256=Luojzhanh6AK949910jN0yTpy8zwF5_At6nThj3Zw9Q,11867
21
- maite_datasets/wrappers/__init__.py,sha256=6uI0ztOB2IlMWln9JkVke4OhU2HQ8i6YCaCNq_q5qb0,225
22
- maite_datasets/wrappers/_torch.py,sha256=dmY6nSyLyVPOzpOE4BDTyOomWdFpN0x5dmH3XUzNetc,4588
23
- maite_datasets-0.0.6.dist-info/METADATA,sha256=cmDnRwPTu1xWbFIpnNQ0jmhEI7XTu6CQ3cm18y0dMDk,5505
24
- maite_datasets-0.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
25
- maite_datasets-0.0.6.dist-info/licenses/LICENSE,sha256=6h3J3R-ajGHh_isDSftzS5_jJjB9HH4TaI0vU-VscaY,1082
26
- maite_datasets-0.0.6.dist-info/RECORD,,