maite-datasets 0.0.7__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- maite_datasets/_base.py +24 -10
- maite_datasets/adapters/_huggingface.py +5 -5
- maite_datasets/object_detection/_antiuav.py +2 -1
- maite_datasets/object_detection/_coco.py +2 -2
- maite_datasets/object_detection/_milco.py +2 -1
- maite_datasets/object_detection/_seadrone.py +2 -0
- maite_datasets/object_detection/_voc.py +3 -3
- maite_datasets/object_detection/_yolo.py +2 -2
- maite_datasets/protocols.py +12 -1
- maite_datasets/wrappers/_torch.py +18 -8
- {maite_datasets-0.0.7.dist-info → maite_datasets-0.0.8.dist-info}/METADATA +1 -1
- {maite_datasets-0.0.7.dist-info → maite_datasets-0.0.8.dist-info}/RECORD +14 -14
- {maite_datasets-0.0.7.dist-info → maite_datasets-0.0.8.dist-info}/WHEEL +0 -0
- {maite_datasets-0.0.7.dist-info → maite_datasets-0.0.8.dist-info}/licenses/LICENSE +0 -0
maite_datasets/_base.py
CHANGED
@@ -8,20 +8,22 @@ from abc import abstractmethod
|
|
8
8
|
from collections import namedtuple
|
9
9
|
from collections.abc import Iterator, Sequence
|
10
10
|
from pathlib import Path
|
11
|
-
from typing import Any, Callable, Generic, Literal, NamedTuple, TypeVar, cast
|
11
|
+
from typing import Any, Callable, Generic, Literal, NamedTuple, Protocol, TypeVar, cast
|
12
12
|
|
13
13
|
import numpy as np
|
14
14
|
from maite.protocols import DatasetMetadata, DatumMetadata
|
15
|
+
from maite.protocols import image_classification as ic
|
16
|
+
from maite.protocols import object_detection as od
|
15
17
|
from numpy.typing import NDArray
|
16
18
|
from PIL import Image
|
17
19
|
|
18
20
|
from maite_datasets._fileio import _ensure_exists
|
19
21
|
from maite_datasets.protocols import Array
|
20
22
|
|
21
|
-
_T = TypeVar("_T")
|
22
23
|
_T_co = TypeVar("_T_co", covariant=True)
|
23
24
|
_TArray = TypeVar("_TArray", bound=Array)
|
24
25
|
_TTarget = TypeVar("_TTarget")
|
26
|
+
_TODTarget = TypeVar("_TODTarget", bound=od.ObjectDetectionTarget)
|
25
27
|
_TRawTarget = TypeVar(
|
26
28
|
"_TRawTarget",
|
27
29
|
Sequence[int],
|
@@ -30,8 +32,7 @@ _TRawTarget = TypeVar(
|
|
30
32
|
)
|
31
33
|
_TAnnotation = TypeVar("_TAnnotation", int, str, tuple[list[int], list[list[float]]])
|
32
34
|
|
33
|
-
|
34
|
-
ObjectDetectionTarget = namedtuple("ObjectDetectionTarget", ["boxes", "labels", "scores"])
|
35
|
+
ObjectDetectionTargetTuple = namedtuple("ObjectDetectionTargetTuple", ["boxes", "labels", "scores"])
|
35
36
|
|
36
37
|
|
37
38
|
class BaseDatasetMixin(Generic[_TArray]):
|
@@ -249,6 +250,7 @@ class BaseICDataset(
|
|
249
250
|
BaseDownloadedDataset[_TArray, _TArray, list[int], int],
|
250
251
|
BaseDatasetMixin[_TArray],
|
251
252
|
BaseDataset[_TArray, _TArray],
|
253
|
+
ic.Dataset,
|
252
254
|
):
|
253
255
|
"""
|
254
256
|
Base class for image classification datasets.
|
@@ -278,9 +280,10 @@ class BaseICDataset(
|
|
278
280
|
|
279
281
|
|
280
282
|
class BaseODDataset(
|
281
|
-
BaseDownloadedDataset[_TArray,
|
283
|
+
BaseDownloadedDataset[_TArray, _TODTarget, _TRawTarget, _TAnnotation],
|
282
284
|
BaseDatasetMixin[_TArray],
|
283
|
-
BaseDataset[_TArray,
|
285
|
+
BaseDataset[_TArray, _TODTarget],
|
286
|
+
od.Dataset,
|
284
287
|
):
|
285
288
|
"""
|
286
289
|
Base class for object detection datasets.
|
@@ -288,7 +291,7 @@ class BaseODDataset(
|
|
288
291
|
|
289
292
|
_bboxes_per_size: bool = False
|
290
293
|
|
291
|
-
def __getitem__(self, index: int) -> tuple[_TArray,
|
294
|
+
def __getitem__(self, index: int) -> tuple[_TArray, _TODTarget, DatumMetadata]:
|
292
295
|
"""
|
293
296
|
Args
|
294
297
|
----
|
@@ -310,7 +313,9 @@ class BaseODDataset(
|
|
310
313
|
if self._bboxes_per_size and boxes:
|
311
314
|
boxes = boxes * np.asarray([[img_size[1], img_size[2], img_size[1], img_size[2]]])
|
312
315
|
# Create the Object Detection Target
|
313
|
-
target =
|
316
|
+
target = ObjectDetectionTargetTuple(self._as_array(boxes), self._as_array(labels), self._one_hot_encode(labels))
|
317
|
+
# Cast target explicitly to ODTarget as namedtuple does not provide any typing metadata
|
318
|
+
target = cast(_TODTarget, target)
|
314
319
|
|
315
320
|
img_metadata = {key: val[index] for key, val in self._datum_metadata.items()}
|
316
321
|
img_metadata = img_metadata | additional_metadata
|
@@ -324,6 +329,15 @@ class BaseODDataset(
|
|
324
329
|
NumpyArray = NDArray[np.floating[Any]] | NDArray[np.integer[Any]]
|
325
330
|
|
326
331
|
|
332
|
+
class NumpyObjectDetectionTarget(od.ObjectDetectionTarget, Protocol):
|
333
|
+
@property
|
334
|
+
def boxes(self) -> NumpyArray: ...
|
335
|
+
@property
|
336
|
+
def labels(self) -> NumpyArray: ...
|
337
|
+
@property
|
338
|
+
def scores(self) -> NumpyArray: ...
|
339
|
+
|
340
|
+
|
327
341
|
class BaseDatasetNumpyMixin(BaseDatasetMixin[NumpyArray]):
|
328
342
|
def _as_array(self, raw: list[Any]) -> NumpyArray:
|
329
343
|
return np.asarray(raw)
|
@@ -347,8 +361,8 @@ NumpyImageClassificationDatumTransform = Callable[
|
|
347
361
|
tuple[NumpyArray, NumpyArray, DatumMetadata],
|
348
362
|
]
|
349
363
|
NumpyObjectDetectionDatumTransform = Callable[
|
350
|
-
[tuple[NumpyArray,
|
351
|
-
tuple[NumpyArray,
|
364
|
+
[tuple[NumpyArray, NumpyObjectDetectionTarget, DatumMetadata]],
|
365
|
+
tuple[NumpyArray, NumpyObjectDetectionTarget, DatumMetadata],
|
352
366
|
]
|
353
367
|
NumpyImageClassificationTransform = NumpyImageTransform | NumpyImageClassificationDatumTransform
|
354
368
|
NumpyObjectDetectionTransform = NumpyImageTransform | NumpyObjectDetectionDatumTransform
|
@@ -10,7 +10,7 @@ import maite.protocols.object_detection as od
|
|
10
10
|
import numpy as np
|
11
11
|
from maite.protocols import DatasetMetadata, DatumMetadata
|
12
12
|
|
13
|
-
from maite_datasets._base import BaseDataset, NumpyArray,
|
13
|
+
from maite_datasets._base import BaseDataset, NumpyArray, ObjectDetectionTargetTuple
|
14
14
|
from maite_datasets.protocols import HFArray, HFClassLabel, HFDataset, HFImage, HFList, HFValue
|
15
15
|
from maite_datasets.wrappers._torch import TTarget
|
16
16
|
|
@@ -135,7 +135,7 @@ class HFImageClassificationDataset(HFBaseDataset[NumpyArray], ic.Dataset):
|
|
135
135
|
|
136
136
|
# Enhanced metadata with validation
|
137
137
|
self.metadata: DatasetMetadata = DatasetMetadata(
|
138
|
-
id=self._metadata_id, index2label=dict(enumerate(label_feature.names), **self._metadata_dict
|
138
|
+
id=self._metadata_id, index2label=dict(enumerate(label_feature.names)), **self._metadata_dict
|
139
139
|
)
|
140
140
|
|
141
141
|
def __getitem__(self, index: int) -> tuple[NumpyArray, NumpyArray, DatumMetadata]:
|
@@ -157,7 +157,7 @@ class HFImageClassificationDataset(HFBaseDataset[NumpyArray], ic.Dataset):
|
|
157
157
|
return image, one_hot_label, datum_metadata
|
158
158
|
|
159
159
|
|
160
|
-
class HFObjectDetectionDataset(HFBaseDataset[
|
160
|
+
class HFObjectDetectionDataset(HFBaseDataset[ObjectDetectionTargetTuple], od.Dataset):
|
161
161
|
"""Wraps a Hugging Face dataset to comply with the ObjectDetectionDataset protocol."""
|
162
162
|
|
163
163
|
def __init__(self, hf_dataset: HFDataset, image_key: str, objects_key: str, bbox_key: str, label_key: str) -> None:
|
@@ -225,7 +225,7 @@ class HFObjectDetectionDataset(HFBaseDataset[ObjectDetectionTarget], od.Dataset)
|
|
225
225
|
|
226
226
|
return label_feature
|
227
227
|
|
228
|
-
def __getitem__(self, index: int) -> tuple[NumpyArray,
|
228
|
+
def __getitem__(self, index: int) -> tuple[NumpyArray, ObjectDetectionTargetTuple, DatumMetadata]:
|
229
229
|
if not 0 <= index < len(self.source):
|
230
230
|
raise IndexError(f"Index {index} out of range for dataset of size {len(self.source)}")
|
231
231
|
|
@@ -237,7 +237,7 @@ class HFObjectDetectionDataset(HFBaseDataset[ObjectDetectionTarget], od.Dataset)
|
|
237
237
|
boxes = objects[self._bbox_key]
|
238
238
|
labels = objects[self._label_key]
|
239
239
|
scores = np.zeros_like(labels, dtype=np.float32)
|
240
|
-
target =
|
240
|
+
target = ObjectDetectionTargetTuple(boxes, labels, scores)
|
241
241
|
|
242
242
|
# Process metadata
|
243
243
|
datum_metadata = self._get_base_metadata(index)
|
@@ -13,11 +13,12 @@ from maite_datasets._base import (
|
|
13
13
|
BaseODDataset,
|
14
14
|
DataLocation,
|
15
15
|
NumpyArray,
|
16
|
+
NumpyObjectDetectionTarget,
|
16
17
|
NumpyObjectDetectionTransform,
|
17
18
|
)
|
18
19
|
|
19
20
|
|
20
|
-
class AntiUAVDetection(BaseODDataset[NumpyArray, list[str], str], BaseDatasetNumpyMixin):
|
21
|
+
class AntiUAVDetection(BaseODDataset[NumpyArray, NumpyObjectDetectionTarget, list[str], str], BaseDatasetNumpyMixin):
|
21
22
|
"""
|
22
23
|
A UAV detection dataset focused on detecting UAVs in natural images against large variation in backgrounds.
|
23
24
|
|
@@ -11,7 +11,7 @@ import numpy as np
|
|
11
11
|
from maite.protocols import DatasetMetadata, DatumMetadata
|
12
12
|
from PIL import Image
|
13
13
|
|
14
|
-
from maite_datasets._base import BaseDataset,
|
14
|
+
from maite_datasets._base import BaseDataset, ObjectDetectionTargetTuple
|
15
15
|
from maite_datasets._reader import BaseDatasetReader
|
16
16
|
|
17
17
|
|
@@ -269,7 +269,7 @@ class COCODataset(BaseDataset):
|
|
269
269
|
scores = np.empty(0, dtype=np.float32)
|
270
270
|
annotation_metadata = []
|
271
271
|
|
272
|
-
target =
|
272
|
+
target = ObjectDetectionTargetTuple(boxes, labels, scores)
|
273
273
|
|
274
274
|
# Create comprehensive datum metadata
|
275
275
|
datum_metadata = DatumMetadata(
|
@@ -11,11 +11,12 @@ from maite_datasets._base import (
|
|
11
11
|
BaseODDataset,
|
12
12
|
DataLocation,
|
13
13
|
NumpyArray,
|
14
|
+
NumpyObjectDetectionTarget,
|
14
15
|
NumpyObjectDetectionTransform,
|
15
16
|
)
|
16
17
|
|
17
18
|
|
18
|
-
class MILCO(BaseODDataset[NumpyArray, list[str], str], BaseDatasetNumpyMixin):
|
19
|
+
class MILCO(BaseODDataset[NumpyArray, NumpyObjectDetectionTarget, list[str], str], BaseDatasetNumpyMixin):
|
19
20
|
"""
|
20
21
|
A side-scan sonar dataset focused on mine-like object detection.
|
21
22
|
|
@@ -12,6 +12,7 @@ from maite_datasets._base import (
|
|
12
12
|
BaseODDataset,
|
13
13
|
DataLocation,
|
14
14
|
NumpyArray,
|
15
|
+
NumpyObjectDetectionTarget,
|
15
16
|
NumpyObjectDetectionTransform,
|
16
17
|
)
|
17
18
|
from maite_datasets._fileio import _ensure_exists
|
@@ -20,6 +21,7 @@ from maite_datasets._fileio import _ensure_exists
|
|
20
21
|
class SeaDrone(
|
21
22
|
BaseODDataset[
|
22
23
|
NumpyArray,
|
24
|
+
NumpyObjectDetectionTarget,
|
23
25
|
list[tuple[list[int], list[list[float]]]],
|
24
26
|
tuple[list[int], list[list[float]]],
|
25
27
|
],
|
@@ -16,8 +16,8 @@ from maite_datasets._base import (
|
|
16
16
|
BaseODDataset,
|
17
17
|
DataLocation,
|
18
18
|
NumpyArray,
|
19
|
+
NumpyObjectDetectionTarget,
|
19
20
|
NumpyObjectDetectionTransform,
|
20
|
-
ObjectDetectionTarget,
|
21
21
|
_ensure_exists,
|
22
22
|
)
|
23
23
|
|
@@ -46,7 +46,7 @@ VOCClassStringMap = Literal[
|
|
46
46
|
TVOCClassMap = TypeVar("TVOCClassMap", VOCClassStringMap, int, list[VOCClassStringMap], list[int])
|
47
47
|
|
48
48
|
|
49
|
-
class BaseVOCDataset(BaseDownloadedDataset[NumpyArray,
|
49
|
+
class BaseVOCDataset(BaseDownloadedDataset[NumpyArray, NumpyObjectDetectionTarget, list[str], str]):
|
50
50
|
_resources = [
|
51
51
|
DataLocation(
|
52
52
|
url="https://data.brainchip.com/dataset-mirror/voc/VOCtrainval_11-May-2012.tar",
|
@@ -431,7 +431,7 @@ class BaseVOCDataset(BaseDownloadedDataset[NumpyArray, ObjectDetectionTarget, li
|
|
431
431
|
|
432
432
|
class VOCDetection(
|
433
433
|
BaseVOCDataset,
|
434
|
-
BaseODDataset[NumpyArray, list[str], str],
|
434
|
+
BaseODDataset[NumpyArray, NumpyObjectDetectionTarget, list[str], str],
|
435
435
|
BaseDatasetNumpyMixin,
|
436
436
|
):
|
437
437
|
"""
|
@@ -12,7 +12,7 @@ import numpy as np
|
|
12
12
|
from maite.protocols import DatasetMetadata, DatumMetadata
|
13
13
|
from PIL import Image
|
14
14
|
|
15
|
-
from maite_datasets._base import BaseDataset,
|
15
|
+
from maite_datasets._base import BaseDataset, ObjectDetectionTargetTuple
|
16
16
|
from maite_datasets._reader import BaseDatasetReader
|
17
17
|
|
18
18
|
|
@@ -297,7 +297,7 @@ class YOLODataset(BaseDataset):
|
|
297
297
|
labels = np.empty(0, dtype=np.int64)
|
298
298
|
scores = np.empty(0, dtype=np.float32)
|
299
299
|
|
300
|
-
target =
|
300
|
+
target = ObjectDetectionTargetTuple(boxes, labels, scores)
|
301
301
|
|
302
302
|
# Create comprehensive datum metadata
|
303
303
|
datum_metadata = DatumMetadata(
|
maite_datasets/protocols.py
CHANGED
@@ -3,7 +3,7 @@ Common type protocols used for interoperability.
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
from collections.abc import Iterable, Iterator, Mapping, Sequence
|
6
|
-
from typing import Any, Protocol, overload, runtime_checkable
|
6
|
+
from typing import Any, Generic, Protocol, TypeVar, overload, runtime_checkable
|
7
7
|
|
8
8
|
|
9
9
|
@runtime_checkable
|
@@ -23,6 +23,17 @@ class Array(Protocol):
|
|
23
23
|
def __len__(self) -> int: ...
|
24
24
|
|
25
25
|
|
26
|
+
TBoxes = TypeVar("TBoxes", Array, Sequence)
|
27
|
+
TLabels = TypeVar("TLabels", Array, Sequence)
|
28
|
+
TScores = TypeVar("TScores", Array, Sequence)
|
29
|
+
|
30
|
+
|
31
|
+
class GenericObjectDetectionTarget(Generic[TBoxes, TLabels, TScores], Protocol):
|
32
|
+
boxes: TBoxes
|
33
|
+
labels: TLabels
|
34
|
+
scores: TScores
|
35
|
+
|
36
|
+
|
26
37
|
@runtime_checkable
|
27
38
|
class HFDatasetInfo(Protocol):
|
28
39
|
@property
|
@@ -1,21 +1,31 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
-
from typing import Any, Callable, Generic, TypeAlias, TypeVar, cast, overload
|
3
|
+
from typing import Any, Callable, Generic, Protocol, TypeAlias, TypeVar, cast, overload
|
4
4
|
|
5
5
|
import torch
|
6
6
|
from maite.protocols import DatasetMetadata, DatumMetadata
|
7
|
-
from maite.protocols.object_detection import ObjectDetectionTarget
|
7
|
+
from maite.protocols.object_detection import ObjectDetectionTarget
|
8
8
|
from torch import Tensor
|
9
9
|
from torchvision.tv_tensors import BoundingBoxes, Image
|
10
10
|
|
11
|
-
from maite_datasets._base import BaseDataset,
|
11
|
+
from maite_datasets._base import BaseDataset, ObjectDetectionTargetTuple
|
12
12
|
from maite_datasets.protocols import Array
|
13
13
|
|
14
14
|
TArray = TypeVar("TArray", bound=Array)
|
15
15
|
TTarget = TypeVar("TTarget")
|
16
16
|
|
17
|
+
|
18
|
+
class TorchvisionObjectDetectionTarget(Protocol):
|
19
|
+
@property
|
20
|
+
def boxes(self) -> BoundingBoxes: ...
|
21
|
+
@property
|
22
|
+
def labels(self) -> Tensor: ...
|
23
|
+
@property
|
24
|
+
def scores(self) -> Tensor: ...
|
25
|
+
|
26
|
+
|
17
27
|
TorchvisionImageClassificationDatum: TypeAlias = tuple[Image, Tensor, DatumMetadata]
|
18
|
-
TorchvisionObjectDetectionDatum: TypeAlias = tuple[Image,
|
28
|
+
TorchvisionObjectDetectionDatum: TypeAlias = tuple[Image, ObjectDetectionTargetTuple, DatumMetadata]
|
19
29
|
|
20
30
|
|
21
31
|
class TorchvisionWrapper(Generic[TArray, TTarget]):
|
@@ -62,9 +72,9 @@ class TorchvisionWrapper(Generic[TArray, TTarget]):
|
|
62
72
|
@overload
|
63
73
|
def __getitem__(
|
64
74
|
self: TorchvisionWrapper[TArray, TTarget], index: int
|
65
|
-
) -> tuple[Image,
|
75
|
+
) -> tuple[Image, TorchvisionObjectDetectionTarget, DatumMetadata]: ...
|
66
76
|
|
67
|
-
def __getitem__(self, index: int) -> tuple[Image, Tensor |
|
77
|
+
def __getitem__(self, index: int) -> tuple[Image, Tensor | TorchvisionObjectDetectionTarget, DatumMetadata]:
|
68
78
|
"""Get item with torch tensor conversion."""
|
69
79
|
image, target, metadata = self._dataset[index]
|
70
80
|
|
@@ -78,14 +88,14 @@ class TorchvisionWrapper(Generic[TArray, TTarget]):
|
|
78
88
|
torch_datum = self._transform((torch_image, torch_target, metadata))
|
79
89
|
return cast(TorchvisionImageClassificationDatum, torch_datum)
|
80
90
|
|
81
|
-
if isinstance(target,
|
91
|
+
if isinstance(target, ObjectDetectionTarget):
|
82
92
|
# Object detection case
|
83
93
|
torch_boxes = BoundingBoxes(
|
84
94
|
torch.tensor(target.boxes), format="XYXY", canvas_size=(torch_image.shape[-2], torch_image.shape[-1])
|
85
95
|
) # type: ignore
|
86
96
|
torch_labels = torch.tensor(target.labels, dtype=torch.int64)
|
87
97
|
torch_scores = torch.tensor(target.scores, dtype=torch.float32)
|
88
|
-
torch_target =
|
98
|
+
torch_target = ObjectDetectionTargetTuple(torch_boxes, torch_labels, torch_scores)
|
89
99
|
torch_datum = self._transform((torch_image, torch_target, metadata))
|
90
100
|
return cast(TorchvisionObjectDetectionDatum, torch_datum)
|
91
101
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: maite-datasets
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.8
|
4
4
|
Summary: A collection of Image Classification and Object Detection task datasets conforming to the MAITE protocol.
|
5
5
|
Author-email: Andrew Weng <andrew.weng@ariacoustics.com>, Ryan Wood <ryan.wood@ariacoustics.com>, Shaun Jullens <shaun.jullens@ariacoustics.com>
|
6
6
|
License-Expression: MIT
|
@@ -1,28 +1,28 @@
|
|
1
1
|
maite_datasets/__init__.py,sha256=Z_HyAe08HaHMjzZS2afFumBXYFRFj0ny5ZAIp0hcj4w,569
|
2
|
-
maite_datasets/_base.py,sha256=
|
2
|
+
maite_datasets/_base.py,sha256=6-RG3VJfpNEC7luNGCPpqyIQ5FlWeYRSm_bfo1PXdUQ,12924
|
3
3
|
maite_datasets/_builder.py,sha256=MnCh6z5hSINlzBnK_pdbgI5zSg5d1uq4UvXt3cjn9hs,9820
|
4
4
|
maite_datasets/_collate.py,sha256=pwUnmrbJH5olFjSwF-ZkGdfopTWUUlwmq0d5KzERcy8,4052
|
5
5
|
maite_datasets/_fileio.py,sha256=7S-hF3xU60AdcsPsfYR7rjbeGZUlv3JjGEZhGJOxGYU,5622
|
6
6
|
maite_datasets/_reader.py,sha256=tJqsjfXaK-mrs0Ed4BktombFMmNwCur35W7tuYCflKM,5569
|
7
7
|
maite_datasets/_validate.py,sha256=Uokbolmv1uSv98sph44HON0HEieeK3s2mqbPMP1d5xs,6948
|
8
|
-
maite_datasets/protocols.py,sha256=
|
8
|
+
maite_datasets/protocols.py,sha256=PcIVJzUc5com_pmhzh0CPb0IkrmFof6WtUQXm7_6Vko,2450
|
9
9
|
maite_datasets/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
maite_datasets/adapters/__init__.py,sha256=PDMdvRqLVS7x6ghJ_xUBD5Ebq5HrV5k3p4TqK2_6Gt8,191
|
11
|
-
maite_datasets/adapters/_huggingface.py,sha256=
|
11
|
+
maite_datasets/adapters/_huggingface.py,sha256=dZa2guKeto-oKMM9J_TaI4DSPK18u7PvK3AsHQp3eQc,16199
|
12
12
|
maite_datasets/image_classification/__init__.py,sha256=pcZojkdsiMoLgY4mKjoQY6WyEwiGYHxNrAGpnvn3zsY,308
|
13
13
|
maite_datasets/image_classification/_cifar10.py,sha256=rrkJZ70NBYOSGxligXyakVxNOyGAglN6PaGQazuWNO4,8453
|
14
14
|
maite_datasets/image_classification/_mnist.py,sha256=9isgdi-YXgs6nXoh1j8uOgh4_sIhBIky72Vyl866rTE,8192
|
15
15
|
maite_datasets/image_classification/_ships.py,sha256=nWhte8592lpybhQCCdgT36LnuMQ0PRJWlDxT5-IPUtk,5137
|
16
16
|
maite_datasets/object_detection/__init__.py,sha256=171KT_X6I4YGy18G240N_-ZsKvXJ6YqcBDzkhTiBj2E,587
|
17
|
-
maite_datasets/object_detection/_antiuav.py,sha256=
|
18
|
-
maite_datasets/object_detection/_coco.py,sha256=
|
19
|
-
maite_datasets/object_detection/_milco.py,sha256=
|
20
|
-
maite_datasets/object_detection/_seadrone.py,sha256=
|
21
|
-
maite_datasets/object_detection/_voc.py,sha256=
|
22
|
-
maite_datasets/object_detection/_yolo.py,sha256=
|
17
|
+
maite_datasets/object_detection/_antiuav.py,sha256=8b_AlX5xb5Fu02pG-dldFrHtVZLZzb6ZyGGYj-b7Lt0,8325
|
18
|
+
maite_datasets/object_detection/_coco.py,sha256=vTPQmLSgIM7lNb2PEaCrs1fK7_82j3tXFQQ3A5e0dlM,10319
|
19
|
+
maite_datasets/object_detection/_milco.py,sha256=aX10PbWjIFyajS5ibGA219fZd7tZTkAlvpThoTZpxtM,7988
|
20
|
+
maite_datasets/object_detection/_seadrone.py,sha256=wKcyKQhcvNj5NrLMOD-9KdGaTaBnE6F9uif_J-PKNug,271281
|
21
|
+
maite_datasets/object_detection/_voc.py,sha256=CDFgSpbQZgpSq51CKd9twIUHr6l_GAViVGpmFUNX0Dg,19627
|
22
|
+
maite_datasets/object_detection/_yolo.py,sha256=GIa43Ec_542N7PRo0XngV-Npw3n4yoIND3tI7vas3u0,11877
|
23
23
|
maite_datasets/wrappers/__init__.py,sha256=6uI0ztOB2IlMWln9JkVke4OhU2HQ8i6YCaCNq_q5qb0,225
|
24
|
-
maite_datasets/wrappers/_torch.py,sha256=
|
25
|
-
maite_datasets-0.0.
|
26
|
-
maite_datasets-0.0.
|
27
|
-
maite_datasets-0.0.
|
28
|
-
maite_datasets-0.0.
|
24
|
+
maite_datasets/wrappers/_torch.py,sha256=gYcAdrarcX_DiV51MD1cpOa9YAZGEmWblJEyyLS7rRs,4687
|
25
|
+
maite_datasets-0.0.8.dist-info/METADATA,sha256=1srwva41Dtqwda2AGc0GwnjQs9f-sFMVSLaMrVQJv7Q,7845
|
26
|
+
maite_datasets-0.0.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
27
|
+
maite_datasets-0.0.8.dist-info/licenses/LICENSE,sha256=6h3J3R-ajGHh_isDSftzS5_jJjB9HH4TaI0vU-VscaY,1082
|
28
|
+
maite_datasets-0.0.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|