kaiko-eva 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaiko-eva might be problematic. Click here for more details.

Files changed (41) hide show
  1. eva/.DS_Store +0 -0
  2. eva/core/callbacks/__init__.py +2 -1
  3. eva/core/callbacks/config.py +143 -0
  4. eva/core/data/datasets/__init__.py +10 -2
  5. eva/core/data/datasets/embeddings/__init__.py +13 -0
  6. eva/core/data/datasets/{classification/embeddings.py → embeddings/base.py} +41 -43
  7. eva/core/data/datasets/embeddings/classification/__init__.py +10 -0
  8. eva/core/data/datasets/embeddings/classification/embeddings.py +66 -0
  9. eva/core/data/datasets/embeddings/classification/multi_embeddings.py +106 -0
  10. eva/core/data/transforms/__init__.py +3 -1
  11. eva/core/data/transforms/padding/__init__.py +5 -0
  12. eva/core/data/transforms/padding/pad_2d_tensor.py +38 -0
  13. eva/core/data/transforms/sampling/__init__.py +5 -0
  14. eva/core/data/transforms/sampling/sample_from_axis.py +40 -0
  15. eva/core/loggers/__init__.py +7 -0
  16. eva/core/loggers/dummy.py +38 -0
  17. eva/core/loggers/experimental_loggers.py +8 -0
  18. eva/core/loggers/log/__init__.py +5 -0
  19. eva/core/loggers/log/parameters.py +64 -0
  20. eva/core/loggers/log/utils.py +13 -0
  21. eva/core/models/modules/head.py +6 -11
  22. eva/core/models/modules/module.py +25 -1
  23. eva/core/trainers/_recorder.py +69 -7
  24. eva/core/trainers/functional.py +22 -5
  25. eva/core/trainers/trainer.py +20 -6
  26. eva/vision/data/datasets/__init__.py +1 -8
  27. eva/vision/data/datasets/_utils.py +3 -3
  28. eva/vision/data/datasets/classification/__init__.py +1 -8
  29. eva/vision/data/datasets/segmentation/base.py +20 -35
  30. eva/vision/data/datasets/segmentation/total_segmentator.py +88 -69
  31. eva/vision/models/.DS_Store +0 -0
  32. eva/vision/models/networks/.DS_Store +0 -0
  33. eva/vision/utils/convert.py +24 -0
  34. eva/vision/utils/io/nifti.py +10 -6
  35. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/METADATA +51 -25
  36. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/RECORD +39 -22
  37. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/WHEEL +1 -1
  38. eva/core/data/datasets/classification/__init__.py +0 -5
  39. eva/vision/data/datasets/classification/total_segmentator.py +0 -213
  40. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/entry_points.txt +0 -0
  41. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/licenses/LICENSE +0 -0
@@ -1,213 +0,0 @@
1
- """TotalSegmentator 2D segmentation dataset class."""
2
-
3
- import functools
4
- import os
5
- from glob import glob
6
- from typing import Callable, Dict, List, Literal, Tuple
7
-
8
- import numpy as np
9
- from torchvision.datasets import utils
10
- from typing_extensions import override
11
-
12
- from eva.vision.data.datasets import _utils, _validators, structs
13
- from eva.vision.data.datasets.classification import base
14
- from eva.vision.utils import io
15
-
16
-
17
- class TotalSegmentatorClassification(base.ImageClassification):
18
- """TotalSegmentator multi-label classification dataset."""
19
-
20
- _train_index_ranges: List[Tuple[int, int]] = [(0, 83)]
21
- """Train range indices."""
22
-
23
- _val_index_ranges: List[Tuple[int, int]] = [(83, 103)]
24
- """Validation range indices."""
25
-
26
- _n_slices_per_image: int = 20
27
- """The amount of slices to sample per 3D CT scan image."""
28
-
29
- _resources_full: List[structs.DownloadResource] = [
30
- structs.DownloadResource(
31
- filename="Totalsegmentator_dataset_v201.zip",
32
- url="https://zenodo.org/records/10047292/files/Totalsegmentator_dataset_v201.zip",
33
- md5="fe250e5718e0a3b5df4c4ea9d58a62fe",
34
- ),
35
- ]
36
- """Resources for the full dataset version."""
37
-
38
- _resources_small: List[structs.DownloadResource] = [
39
- structs.DownloadResource(
40
- filename="Totalsegmentator_dataset_small_v201.zip",
41
- url="https://zenodo.org/records/10047263/files/Totalsegmentator_dataset_small_v201.zip",
42
- md5="6b5524af4b15e6ba06ef2d700c0c73e0",
43
- ),
44
- ]
45
- """Resources for the small dataset version."""
46
-
47
- def __init__(
48
- self,
49
- root: str,
50
- split: Literal["train", "val"] | None,
51
- version: Literal["small", "full"] = "small",
52
- download: bool = False,
53
- image_transforms: Callable | None = None,
54
- target_transforms: Callable | None = None,
55
- ) -> None:
56
- """Initialize dataset.
57
-
58
- Args:
59
- root: Path to the root directory of the dataset. The dataset will
60
- be downloaded and extracted here, if it does not already exist.
61
- split: Dataset split to use. If None, the entire dataset is used.
62
- version: The version of the dataset to initialize.
63
- download: Whether to download the data for the specified split.
64
- Note that the download will be executed only by additionally
65
- calling the :meth:`prepare_data` method and if the data does not
66
- exist yet on disk.
67
- image_transforms: A function/transform that takes in an image
68
- and returns a transformed version.
69
- target_transforms: A function/transform that takes in the target
70
- and transforms it.
71
- """
72
- super().__init__(
73
- image_transforms=image_transforms,
74
- target_transforms=target_transforms,
75
- )
76
-
77
- self._root = root
78
- self._split = split
79
- self._version = version
80
- self._download = download
81
-
82
- self._samples_dirs: List[str] = []
83
- self._indices: List[int] = []
84
-
85
- @functools.cached_property
86
- @override
87
- def classes(self) -> List[str]:
88
- def get_filename(path: str) -> str:
89
- """Returns the filename from the full path."""
90
- return os.path.basename(path).split(".")[0]
91
-
92
- first_sample_labels = os.path.join(
93
- self._root, self._samples_dirs[0], "segmentations", "*.nii.gz"
94
- )
95
- return sorted(map(get_filename, glob(first_sample_labels)))
96
-
97
- @property
98
- @override
99
- def class_to_idx(self) -> Dict[str, int]:
100
- return {label: index for index, label in enumerate(self.classes)}
101
-
102
- @override
103
- def filename(self, index: int) -> str:
104
- sample_dir = self._samples_dirs[self._indices[index]]
105
- return os.path.join(sample_dir, "ct.nii.gz")
106
-
107
- @override
108
- def prepare_data(self) -> None:
109
- if self._download:
110
- self._download_dataset()
111
- _validators.check_dataset_exists(self._root, True)
112
-
113
- @override
114
- def configure(self) -> None:
115
- self._samples_dirs = self._fetch_samples_dirs()
116
- self._indices = self._create_indices()
117
-
118
- @override
119
- def validate(self) -> None:
120
- _validators.check_dataset_integrity(
121
- self,
122
- length=1660 if self._split == "train" else 400,
123
- n_classes=117,
124
- first_and_last_labels=("adrenal_gland_left", "vertebrae_T9"),
125
- )
126
-
127
- @override
128
- def __len__(self) -> int:
129
- return len(self._indices) * self._n_slices_per_image
130
-
131
- @override
132
- def load_image(self, index: int) -> np.ndarray:
133
- image_path = self._get_image_path(index)
134
- slice_index = self._get_sample_slice_index(index)
135
- image_array = io.read_nifti_slice(image_path, slice_index)
136
- return image_array.repeat(3, axis=2)
137
-
138
- @override
139
- def load_target(self, index: int) -> np.ndarray:
140
- masks = self._load_masks(index)
141
- targets = [1 in masks[..., mask_index] for mask_index in range(masks.shape[-1])]
142
- return np.asarray(targets, dtype=np.int64)
143
-
144
- def _load_masks(self, index: int) -> np.ndarray:
145
- """Returns the `index`'th target mask sample."""
146
- masks_dir = self._get_masks_dir(index)
147
- slice_index = self._get_sample_slice_index(index)
148
- mask_paths = (os.path.join(masks_dir, label + ".nii.gz") for label in self.classes)
149
- masks = [io.read_nifti_slice(path, slice_index) for path in mask_paths]
150
- return np.concatenate(masks, axis=-1)
151
-
152
- def _get_masks_dir(self, index: int) -> str:
153
- """Returns the directory of the corresponding masks."""
154
- sample_dir = self._get_sample_dir(index)
155
- return os.path.join(self._root, sample_dir, "segmentations")
156
-
157
- def _get_image_path(self, index: int) -> str:
158
- """Returns the corresponding image path."""
159
- sample_dir = self._get_sample_dir(index)
160
- return os.path.join(self._root, sample_dir, "ct.nii.gz")
161
-
162
- def _get_sample_dir(self, index: int) -> str:
163
- """Returns the corresponding sample directory."""
164
- sample_index = self._indices[index // self._n_slices_per_image]
165
- return self._samples_dirs[sample_index]
166
-
167
- def _get_sample_slice_index(self, index: int) -> int:
168
- """Returns the corresponding slice index."""
169
- image_path = self._get_image_path(index)
170
- total_slices = io.fetch_total_nifti_slices(image_path)
171
- slice_indices = np.linspace(0, total_slices - 1, num=self._n_slices_per_image, dtype=int)
172
- return slice_indices[index % self._n_slices_per_image]
173
-
174
- def _fetch_samples_dirs(self) -> List[str]:
175
- """Returns the name of all the samples of all the splits of the dataset."""
176
- sample_filenames = [
177
- filename
178
- for filename in os.listdir(self._root)
179
- if os.path.isdir(os.path.join(self._root, filename))
180
- ]
181
- return sorted(sample_filenames)
182
-
183
- def _create_indices(self) -> List[int]:
184
- """Builds the dataset indices for the specified split."""
185
- split_index_ranges = {
186
- "train": self._train_index_ranges,
187
- "val": self._val_index_ranges,
188
- None: [(0, 103)],
189
- }
190
- index_ranges = split_index_ranges.get(self._split)
191
- if index_ranges is None:
192
- raise ValueError("Invalid data split. Use 'train', 'val' or `None`.")
193
-
194
- return _utils.ranges_to_indices(index_ranges)
195
-
196
- def _download_dataset(self) -> None:
197
- """Downloads the dataset."""
198
- dataset_resources = {
199
- "small": self._resources_small,
200
- "full": self._resources_full,
201
- None: (0, 103),
202
- }
203
- resources = dataset_resources.get(self._version)
204
- if resources is None:
205
- raise ValueError("Invalid data version. Use 'small' or 'full'.")
206
-
207
- for resource in resources:
208
- utils.download_and_extract_archive(
209
- resource.url,
210
- download_root=self._root,
211
- filename=resource.filename,
212
- remove_finished=True,
213
- )