kaiko-eva 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaiko-eva might be problematic. Click here for more details.

Files changed (41) hide show
  1. eva/.DS_Store +0 -0
  2. eva/core/callbacks/__init__.py +2 -1
  3. eva/core/callbacks/config.py +143 -0
  4. eva/core/data/datasets/__init__.py +10 -2
  5. eva/core/data/datasets/embeddings/__init__.py +13 -0
  6. eva/core/data/datasets/{classification/embeddings.py → embeddings/base.py} +41 -43
  7. eva/core/data/datasets/embeddings/classification/__init__.py +10 -0
  8. eva/core/data/datasets/embeddings/classification/embeddings.py +66 -0
  9. eva/core/data/datasets/embeddings/classification/multi_embeddings.py +106 -0
  10. eva/core/data/transforms/__init__.py +3 -1
  11. eva/core/data/transforms/padding/__init__.py +5 -0
  12. eva/core/data/transforms/padding/pad_2d_tensor.py +38 -0
  13. eva/core/data/transforms/sampling/__init__.py +5 -0
  14. eva/core/data/transforms/sampling/sample_from_axis.py +40 -0
  15. eva/core/loggers/__init__.py +7 -0
  16. eva/core/loggers/dummy.py +38 -0
  17. eva/core/loggers/experimental_loggers.py +8 -0
  18. eva/core/loggers/log/__init__.py +5 -0
  19. eva/core/loggers/log/parameters.py +64 -0
  20. eva/core/loggers/log/utils.py +13 -0
  21. eva/core/models/modules/head.py +6 -11
  22. eva/core/models/modules/module.py +25 -1
  23. eva/core/trainers/_recorder.py +69 -7
  24. eva/core/trainers/functional.py +22 -5
  25. eva/core/trainers/trainer.py +20 -6
  26. eva/vision/data/datasets/__init__.py +1 -8
  27. eva/vision/data/datasets/_utils.py +3 -3
  28. eva/vision/data/datasets/classification/__init__.py +1 -8
  29. eva/vision/data/datasets/segmentation/base.py +20 -35
  30. eva/vision/data/datasets/segmentation/total_segmentator.py +88 -69
  31. eva/vision/models/.DS_Store +0 -0
  32. eva/vision/models/networks/.DS_Store +0 -0
  33. eva/vision/utils/convert.py +24 -0
  34. eva/vision/utils/io/nifti.py +10 -6
  35. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/METADATA +51 -25
  36. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/RECORD +39 -22
  37. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/WHEEL +1 -1
  38. eva/core/data/datasets/classification/__init__.py +0 -5
  39. eva/vision/data/datasets/classification/total_segmentator.py +0 -213
  40. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/entry_points.txt +0 -0
  41. {kaiko_eva-0.0.1.dist-info → kaiko_eva-0.0.2.dist-info}/licenses/LICENSE +0 -0
@@ -6,25 +6,26 @@ from glob import glob
6
6
  from typing import Callable, Dict, List, Literal, Tuple
7
7
 
8
8
  import numpy as np
9
+ from torchvision import tv_tensors
9
10
  from torchvision.datasets import utils
10
11
  from typing_extensions import override
11
12
 
12
13
  from eva.vision.data.datasets import _utils, _validators, structs
13
14
  from eva.vision.data.datasets.segmentation import base
14
- from eva.vision.utils import io
15
+ from eva.vision.utils import convert, io
15
16
 
16
17
 
17
18
  class TotalSegmentator2D(base.ImageSegmentation):
18
19
  """TotalSegmentator 2D segmentation dataset."""
19
20
 
20
- _train_index_ranges: List[Tuple[int, int]] = [(0, 83)]
21
- """Train range indices."""
21
+ _expected_dataset_lengths: Dict[str, int] = {
22
+ "train_small": 29892,
23
+ "val_small": 6480,
24
+ }
25
+ """Dataset version and split to the expected size."""
22
26
 
23
- _val_index_ranges: List[Tuple[int, int]] = [(83, 103)]
24
- """Validation range indices."""
25
-
26
- _n_slices_per_image: int = 20
27
- """The amount of slices to sample per 3D CT scan image."""
27
+ _sample_every_n_slices: int | None = None
28
+ """The amount of slices to sub-sample per 3D CT scan image."""
28
29
 
29
30
  _resources_full: List[structs.DownloadResource] = [
30
31
  structs.DownloadResource(
@@ -48,11 +49,10 @@ class TotalSegmentator2D(base.ImageSegmentation):
48
49
  self,
49
50
  root: str,
50
51
  split: Literal["train", "val"] | None,
51
- version: Literal["small", "full"] = "small",
52
+ version: Literal["small", "full"] | None = "small",
52
53
  download: bool = False,
53
- image_transforms: Callable | None = None,
54
- target_transforms: Callable | None = None,
55
- image_target_transforms: Callable | None = None,
54
+ as_uint8: bool = True,
55
+ transforms: Callable | None = None,
56
56
  ) -> None:
57
57
  """Initialize dataset.
58
58
 
@@ -60,33 +60,26 @@ class TotalSegmentator2D(base.ImageSegmentation):
60
60
  root: Path to the root directory of the dataset. The dataset will
61
61
  be downloaded and extracted here, if it does not already exist.
62
62
  split: Dataset split to use. If `None`, the entire dataset is used.
63
- version: The version of the dataset to initialize.
63
+ version: The version of the dataset to initialize. If `None`, it will
64
+ use the files located at root as is and wont perform any checks.
64
65
  download: Whether to download the data for the specified split.
65
66
  Note that the download will be executed only by additionally
66
67
  calling the :meth:`prepare_data` method and if the data does not
67
68
  exist yet on disk.
68
- image_transforms: A function/transform that takes in an image
69
- and returns a transformed version.
70
- target_transforms: A function/transform that takes in the target
71
- and transforms it.
72
- image_target_transforms: A function/transforms that takes in an
73
- image and a label and returns the transformed versions of both.
74
- This transform happens after the `image_transforms` and
75
- `target_transforms`.
69
+ as_uint8: Whether to convert and return the images as a 8-bit.
70
+ transforms: A function/transforms that takes in an image and a target
71
+ mask and returns the transformed versions of both.
76
72
  """
77
- super().__init__(
78
- image_transforms=image_transforms,
79
- target_transforms=target_transforms,
80
- image_target_transforms=image_target_transforms,
81
- )
73
+ super().__init__(transforms=transforms)
82
74
 
83
75
  self._root = root
84
76
  self._split = split
85
77
  self._version = version
86
78
  self._download = download
79
+ self._as_uint8 = as_uint8
87
80
 
88
81
  self._samples_dirs: List[str] = []
89
- self._indices: List[int] = []
82
+ self._indices: List[Tuple[int, int]] = []
90
83
 
91
84
  @functools.cached_property
92
85
  @override
@@ -107,7 +100,8 @@ class TotalSegmentator2D(base.ImageSegmentation):
107
100
 
108
101
  @override
109
102
  def filename(self, index: int) -> str:
110
- sample_dir = self._samples_dirs[self._indices[index]]
103
+ sample_idx, _ = self._indices[index]
104
+ sample_dir = self._samples_dirs[sample_idx]
111
105
  return os.path.join(sample_dir, "ct.nii.gz")
112
106
 
113
107
  @override
@@ -122,53 +116,58 @@ class TotalSegmentator2D(base.ImageSegmentation):
122
116
 
123
117
  @override
124
118
  def validate(self) -> None:
119
+ if self._version is None:
120
+ return
121
+
125
122
  _validators.check_dataset_integrity(
126
123
  self,
127
- length=1660 if self._split == "train" else 400,
124
+ length=self._expected_dataset_lengths.get(f"{self._split}_{self._version}", 0),
128
125
  n_classes=117,
129
126
  first_and_last_labels=("adrenal_gland_left", "vertebrae_T9"),
130
127
  )
131
128
 
132
129
  @override
133
130
  def __len__(self) -> int:
134
- return len(self._indices) * self._n_slices_per_image
131
+ return len(self._indices)
135
132
 
136
133
  @override
137
- def load_image(self, index: int) -> np.ndarray:
138
- image_path = self._get_image_path(index)
139
- slice_index = self._get_sample_slice_index(index)
134
+ def load_image(self, index: int) -> tv_tensors.Image:
135
+ sample_index, slice_index = self._indices[index]
136
+ image_path = self._get_image_path(sample_index)
140
137
  image_array = io.read_nifti_slice(image_path, slice_index)
141
- return image_array.repeat(3, axis=2)
138
+ if self._as_uint8:
139
+ image_array = convert.to_8bit(image_array)
140
+ image_rgb_array = image_array.repeat(3, axis=2)
141
+ return tv_tensors.Image(image_rgb_array.transpose(2, 0, 1))
142
142
 
143
143
  @override
144
- def load_mask(self, index: int) -> np.ndarray:
145
- masks_dir = self._get_masks_dir(index)
146
- slice_index = self._get_sample_slice_index(index)
144
+ def load_mask(self, index: int) -> tv_tensors.Mask:
145
+ sample_index, slice_index = self._indices[index]
146
+ masks_dir = self._get_masks_dir(sample_index)
147
147
  mask_paths = (os.path.join(masks_dir, label + ".nii.gz") for label in self.classes)
148
- masks = [io.read_nifti_slice(path, slice_index) for path in mask_paths]
149
- return np.concatenate(masks, axis=-1)
150
-
151
- def _get_masks_dir(self, index: int) -> str:
152
- """Returns the directory of the corresponding masks."""
153
- sample_dir = self._get_sample_dir(index)
154
- return os.path.join(self._root, sample_dir, "segmentations")
148
+ one_hot_encoded = np.concatenate(
149
+ [io.read_nifti_slice(path, slice_index) for path in mask_paths],
150
+ axis=2,
151
+ )
152
+ background_mask = one_hot_encoded.sum(axis=2, keepdims=True) == 0
153
+ one_hot_encoded_with_bg = np.concatenate([background_mask, one_hot_encoded], axis=2)
154
+ segmentation_label = np.argmax(one_hot_encoded_with_bg, axis=2)
155
+ return tv_tensors.Mask(segmentation_label)
155
156
 
156
- def _get_image_path(self, index: int) -> str:
157
+ def _get_image_path(self, sample_index: int) -> str:
157
158
  """Returns the corresponding image path."""
158
- sample_dir = self._get_sample_dir(index)
159
+ sample_dir = self._samples_dirs[sample_index]
159
160
  return os.path.join(self._root, sample_dir, "ct.nii.gz")
160
161
 
161
- def _get_sample_dir(self, index: int) -> str:
162
- """Returns the corresponding sample directory."""
163
- sample_index = self._indices[index // self._n_slices_per_image]
164
- return self._samples_dirs[sample_index]
162
+ def _get_masks_dir(self, sample_index: int) -> str:
163
+ """Returns the directory of the corresponding masks."""
164
+ sample_dir = self._samples_dirs[sample_index]
165
+ return os.path.join(self._root, sample_dir, "segmentations")
165
166
 
166
- def _get_sample_slice_index(self, index: int) -> int:
167
- """Returns the corresponding slice index."""
168
- image_path = self._get_image_path(index)
169
- total_slices = io.fetch_total_nifti_slices(image_path)
170
- slice_indices = np.linspace(0, total_slices - 1, num=self._n_slices_per_image, dtype=int)
171
- return slice_indices[index % self._n_slices_per_image]
167
+ def _get_number_of_slices_per_sample(self, sample_index: int) -> int:
168
+ """Returns the total amount of slices of a sample."""
169
+ image_path = self._get_image_path(sample_index)
170
+ return io.fetch_total_nifti_slices(image_path)
172
171
 
173
172
  def _fetch_samples_dirs(self) -> List[str]:
174
173
  """Returns the name of all the samples of all the splits of the dataset."""
@@ -179,31 +178,51 @@ class TotalSegmentator2D(base.ImageSegmentation):
179
178
  ]
180
179
  return sorted(sample_filenames)
181
180
 
182
- def _create_indices(self) -> List[int]:
183
- """Builds the dataset indices for the specified split."""
184
- split_index_ranges = {
185
- "train": self._train_index_ranges,
186
- "val": self._val_index_ranges,
187
- None: [(0, 103)],
188
- }
189
- index_ranges = split_index_ranges.get(self._split)
190
- if index_ranges is None:
191
- raise ValueError("Invalid data split. Use 'train', 'val' or `None`.")
181
+ def _get_split_indices(self) -> List[int]:
182
+ """Returns the samples indices that corresponding the dataset split and version."""
183
+ key = f"{self._split}_{self._version}"
184
+ match key:
185
+ case "train_small":
186
+ index_ranges = [(0, 83)]
187
+ case "val_small":
188
+ index_ranges = [(83, 102)]
189
+ case _:
190
+ index_ranges = [(0, len(self._samples_dirs))]
192
191
 
193
192
  return _utils.ranges_to_indices(index_ranges)
194
193
 
194
+ def _create_indices(self) -> List[Tuple[int, int]]:
195
+ """Builds the dataset indices for the specified split.
196
+
197
+ Returns:
198
+ A list of tuples, where the first value indicates the
199
+ sample index which the second its corresponding slice
200
+ index.
201
+ """
202
+ indices = [
203
+ (sample_idx, slide_idx)
204
+ for sample_idx in self._get_split_indices()
205
+ for slide_idx in range(self._get_number_of_slices_per_sample(sample_idx))
206
+ if slide_idx % (self._sample_every_n_slices or 1) == 0
207
+ ]
208
+ return indices
209
+
195
210
  def _download_dataset(self) -> None:
196
211
  """Downloads the dataset."""
197
212
  dataset_resources = {
198
213
  "small": self._resources_small,
199
214
  "full": self._resources_full,
200
- None: (0, 103),
201
215
  }
202
- resources = dataset_resources.get(self._version)
216
+ resources = dataset_resources.get(self._version or "")
203
217
  if resources is None:
204
- raise ValueError("Invalid data version. Use 'small' or 'full'.")
218
+ raise ValueError(
219
+ f"Can't download data version '{self._version}'. Use 'small' or 'full'."
220
+ )
205
221
 
206
222
  for resource in resources:
223
+ if os.path.isdir(self._root):
224
+ continue
225
+
207
226
  utils.download_and_extract_archive(
208
227
  resource.url,
209
228
  download_root=self._root,
Binary file
Binary file
@@ -0,0 +1,24 @@
1
+ """Image conversion related functionalities."""
2
+
3
+ from typing import Any
4
+
5
+ import numpy as np
6
+ import numpy.typing as npt
7
+
8
+
9
+ def to_8bit(image_array: npt.NDArray[Any]) -> npt.NDArray[np.uint8]:
10
+ """Casts an image of higher bit image (i.e. 16bit) to 8bit.
11
+
12
+ Args:
13
+ image_array: The image array to convert.
14
+
15
+ Returns:
16
+ The image as normalized as a 8-bit format.
17
+ """
18
+ if np.issubdtype(image_array.dtype, np.integer):
19
+ image_array = image_array.astype(np.float64)
20
+
21
+ image_scaled_array = image_array - image_array.min()
22
+ image_scaled_array /= image_scaled_array.max()
23
+ image_scaled_array *= 255
24
+ return image_scaled_array.astype(np.uint8)
@@ -8,16 +8,19 @@ import numpy.typing as npt
8
8
  from eva.vision.utils.io import _utils
9
9
 
10
10
 
11
- def read_nifti_slice(path: str, slice_index: int) -> npt.NDArray[Any]:
11
+ def read_nifti_slice(
12
+ path: str, slice_index: int, *, use_storage_dtype: bool = True
13
+ ) -> npt.NDArray[Any]:
12
14
  """Reads and loads a NIfTI image from a file path as `uint8`.
13
15
 
14
16
  Args:
15
17
  path: The path to the NIfTI file.
16
- slice_index: The image slice index to return. If `None`, it will
17
- return the full 3D image.
18
+ slice_index: The image slice index to return.
19
+ use_storage_dtype: Whether to cast the raw image
20
+ array to the inferred type.
18
21
 
19
22
  Returns:
20
- The image as a numpy array.
23
+ The image as a numpy array (height, width, channels).
21
24
 
22
25
  Raises:
23
26
  FileExistsError: If the path does not exist or it is unreachable.
@@ -25,10 +28,11 @@ def read_nifti_slice(path: str, slice_index: int) -> npt.NDArray[Any]:
25
28
  """
26
29
  _utils.check_file(path)
27
30
  image_data = nib.load(path) # type: ignore
28
- dtype = image_data.get_data_dtype() # type: ignore
29
31
  image_slice = image_data.slicer[:, :, slice_index : slice_index + 1] # type: ignore
30
32
  image_array = image_slice.get_fdata()
31
- return image_array.astype(dtype)
33
+ if use_storage_dtype:
34
+ image_array = image_array.astype(image_data.get_data_dtype()) # type: ignore
35
+ return image_array
32
36
 
33
37
 
34
38
  def fetch_total_nifti_slices(path: str) -> int:
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kaiko-eva
3
- Version: 0.0.1
3
+ Version: 0.0.2
4
4
  Summary: Evaluation Framework for oncology foundation models.
5
- Keywords: machine-learning evaluation-framework oncology foundation-models
6
- Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, Nicolas Känzig <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
7
- Maintainer-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, Nicolas Känzig <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
5
+ Keywords: machine-learning,evaluation-framework,oncology,foundation-models
6
+ Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
7
+ Maintainer-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
8
8
  License: Apache License
9
9
  Version 2.0, January 2004
10
10
  http://www.apache.org/licenses/
@@ -215,15 +215,17 @@ Project-URL: Homepage, https://kaiko-ai.github.io/eva/dev/
215
215
  Project-URL: Repository, https://github.com/kaiko-ai/eva
216
216
  Project-URL: Documentation, https://kaiko-ai.github.io/eva/dev/
217
217
  Requires-Python: >=3.10
218
- Requires-Dist: lightning>=2.2.1
219
- Requires-Dist: jsonargparse[omegaconf]>=4.27.4
218
+ Requires-Dist: torch==2.3.0
219
+ Requires-Dist: lightning>=2.2.2
220
+ Requires-Dist: jsonargparse[omegaconf]==4.28
220
221
  Requires-Dist: tensorboard>=2.16.2
221
222
  Requires-Dist: loguru>=0.7.2
222
223
  Requires-Dist: pandas>=2.2.0
223
224
  Requires-Dist: transformers>=4.38.2
224
225
  Requires-Dist: onnxruntime>=1.17.1
225
- Requires-Dist: onnx>=1.15.0
226
+ Requires-Dist: onnx>=1.16.0
226
227
  Requires-Dist: toolz>=0.12.1
228
+ Requires-Dist: rich>=13.7.1
227
229
  Requires-Dist: h5py>=3.10.0; extra == "vision"
228
230
  Requires-Dist: nibabel>=5.2.0; extra == "vision"
229
231
  Requires-Dist: opencv-python-headless>=4.9.0.80; extra == "vision"
@@ -240,15 +242,19 @@ Description-Content-Type: text/markdown
240
242
 
241
243
  <div align="center">
242
244
 
243
- <img src="https://github.com/kaiko-ai/eva/blob/main/docs/images/eva-logo.png?raw=true" width="400">
245
+ <br />
246
+
247
+ <img src="https://github.com/kaiko-ai/eva/blob/main/docs/images/eva-logo.png?raw=true" width="340">
244
248
 
249
+ <br />
245
250
  <br />
246
251
 
247
252
  _Oncology FM Evaluation Framework by kaiko.ai_
248
253
 
249
254
  [![PyPI](https://img.shields.io/pypi/v/kaiko-eva.svg?logo=python)](https://pypi.python.org/pypi/kaiko-eva)
250
- [![CI](https://github.com/kaiko-ai/eva/workflows/CI/badge.svg)](https://github.com/kaiko-ai/eva/actions?query=workflow%3ACI)
251
- [![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg?labelColor=gray)](https://github.com/kaiko-ai/eva#license)
255
+ [![docs](https://img.shields.io/badge/📚_docs-latest-green)](https://kaiko-ai.github.io/eva/latest)
256
+ [![license](https://img.shields.io/badge/⚖️_License-Apache%202.0-blue.svg?labelColor=gray)](https://github.com/kaiko-ai/eva#license)<br>
257
+ [![paper](http://img.shields.io/badge/OpenReview-MIDL_2024-B31B1B.svg)](https://openreview.net/forum?id=FNBQOPj18N&noteId=FNBQOPj18N)
252
258
 
253
259
  <p align="center">
254
260
  <a href="https://github.com/kaiko-ai/eva#installation">Installation</a> •
@@ -299,18 +305,18 @@ eva --version
299
305
 
300
306
  ## How To Use
301
307
 
302
- _eva_ can be used directly from the terminal as a CLI tool as follows:
308
+ _`eva`_ can be used directly from the terminal as a CLI tool as follows:
303
309
  ```sh
304
310
  eva {fit,predict,predict_fit} --config url/or/path/to/the/config.yaml
305
311
  ```
306
312
 
307
- When used as a CLI tool, `_eva_` supports configuration files (`.yaml`) as an argument to define its functionality.
313
+ When used as a CLI tool, _`eva`_ supports configuration files (`.yaml`) as an argument to define its functionality.
308
314
  Native supported configs can be found at the [configs](https://github.com/kaiko-ai/eva/tree/main/configs) directory
309
315
  of the repo. Apart from cloning the repo, you can download the latest config folder as `.zip` from your browser from
310
316
  [here](https://download-directory.github.io/?url=https://github.com/kaiko-ai/eva/tree/main/configs). Alternatively,
311
317
  from a specific release the configs can be downloaded from the terminal as follows:
312
318
  ```sh
313
- curl -LO https://github.com/kaiko-ai/eva/releases/download/0.0.1/configs.zip | unzip configs.zip
319
+ curl -LO https://github.com/kaiko-ai/eva/releases/download/0.0.1/configs.zip | unzip configs
314
320
  ```
315
321
 
316
322
  For example, to perform a downstream evaluation of DINO ViT-S/16 on the BACH dataset with
@@ -338,7 +344,7 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate
338
344
 
339
345
  ## Benchmarks
340
346
 
341
- In this section you will find model benchmarks which were generated with _eva_.
347
+ In this section you will find model benchmarks which were generated with _`eva`_.
342
348
 
343
349
  ### Table I: WSI patch-level benchmark
344
350
 
@@ -351,13 +357,15 @@ In this section you will find model benchmarks which were generated with _eva_.
351
357
  | ViT-S/16 _(random)_ <sup>[1]</sup> | 0.410 | 0.617 | 0.501 | 0.753 | 0.728 |
352
358
  | ViT-S/16 _(ImageNet)_ <sup>[1]</sup> | 0.695 | 0.935 | 0.831 | 0.864 | 0.849 |
353
359
  | ViT-B/8 _(ImageNet)_ <sup>[1]</sup> | 0.710 | 0.939 | 0.814 | 0.870 | 0.856 |
360
+ | ViT-L/14 _(ImageNet)_ <sup>[1]</sup> | 0.707 | 0.916 | 0.832 | 0.873 | 0.888 |
354
361
  | DINO<sub>(p=16)</sub> <sup>[2]</sup> | 0.801 | 0.934 | 0.768 | 0.889 | 0.895 |
355
362
  | Phikon <sup>[3]</sup> | 0.725 | 0.935 | 0.777 | 0.912 | 0.915 |
356
- | ViT-S/16 _(kaiko.ai)_ <sup>[4]</sup> | 0.797 | 0.943 | 0.828 | 0.903 | 0.893 |
357
- | ViT-S/8 _(kaiko.ai)_ <sup>[4]</sup> | 0.834 | 0.946 | 0.832 | 0.897 | 0.887 |
358
- | ViT-B/16 _(kaiko.ai)_ <sup>[4]</sup> | 0.810 | 0.960 | 0.826 | 0.900 | 0.898 |
359
- | ViT-B/8 _(kaiko.ai)_ <sup>[4]</sup> | 0.865 | 0.956 | 0.809 | 0.913 | 0.921 |
360
- | ViT-L/14 _(kaiko.ai)_ <sup>[4]</sup> | 0.870 | 0.930 | 0.809 | 0.908 | 0.898 |
363
+ | UNI <sup>[4]</sup> | 0.814 | 0.950 | 0.837 | 0.936 | 0.938 |
364
+ | ViT-S/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.797 | 0.943 | 0.828 | 0.903 | 0.893 |
365
+ | ViT-S/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.834 | 0.946 | 0.832 | 0.897 | 0.887 |
366
+ | ViT-B/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.810 | 0.960 | 0.826 | 0.900 | 0.898 |
367
+ | ViT-B/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.865 | 0.956 | 0.809 | 0.913 | 0.921 |
368
+ | ViT-L/14 _(kaiko.ai)_ <sup>[5]</sup> | 0.870 | 0.930 | 0.809 | 0.908 | 0.898 |
361
369
 
362
370
  _Table I: Linear probing evaluation of FMs on patch-level downstream datasets.<br> We report averaged balanced accuracy
363
371
  over 5 runs, with an average standard deviation of ±0.003._
@@ -367,14 +375,15 @@ over 5 runs, with an average standard deviation of ±0.003._
367
375
  <br />
368
376
 
369
377
  _References_:
370
- 1. _"Emerging properties in self-supervised vision transformers”_
371
- 2. _"Benchmarking self-supervised learning on diverse pathology datasets”_
372
- 3. _"Scaling self-supervised learning for histopathology with masked image modeling”_
373
- 4. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_
378
+ 1. _"Emerging properties in self-supervised vision transformers”_, [arXiv](https://arxiv.org/abs/2104.14294)
379
+ 2. _"Benchmarking self-supervised learning on diverse pathology datasets”_, [arXiv](https://arxiv.org/abs/2212.04690)
380
+ 3. _"Scaling self-supervised learning for histopathology with masked image modeling”_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1)
381
+ 4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_, [arXiv](https://arxiv.org/abs/2308.15474)
382
+ 5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_, [arXiv](https://arxiv.org/pdf/2404.15217)
374
383
 
375
384
  ## Contributing
376
385
 
377
- _eva_ is an open source project and welcomes contributions of all kinds. Please checkout the [developer](./docs/DEVELOPER_GUIDE.md)
386
+ _`eva`_ is an open source project and welcomes contributions of all kinds. Please checkout the [developer](./docs/DEVELOPER_GUIDE.md)
378
387
  and [contributing guide](./docs/CONTRIBUTING.md) for help on how to do so.
379
388
 
380
389
  All contributors must follow the [code of conduct](./docs/CODE_OF_CONDUCT.md).
@@ -399,7 +408,24 @@ Our codebase is built using multiple opensource contributions
399
408
 
400
409
  </div>
401
410
 
402
- ---
411
+
412
+ ## Citation
413
+
414
+ If you find this repository useful, please consider giving a star ⭐ and adding the following citation:
415
+
416
+ ```
417
+ @inproceedings{
418
+ kaiko.ai2024eva,
419
+ title={eva: Evaluation framework for pathology foundation models},
420
+ author={kaiko.ai and Ioannis Gatopoulos and Nicolas K{\"a}nzig and Roman Moser and Sebastian Ot{\'a}lora},
421
+ booktitle={Medical Imaging with Deep Learning},
422
+ year={2024},
423
+ url={https://openreview.net/forum?id=FNBQOPj18N}
424
+ }
425
+ ```
426
+
427
+ <br />
428
+
403
429
  <div align="center">
404
430
  <img src="https://github.com/kaiko-ai/eva/blob/main/docs/images/kaiko-logo.png?raw=true" width="200">
405
431
  </div>
@@ -1,8 +1,10 @@
1
+ eva/.DS_Store,sha256=nHFfks7iwDvTdxEQbx3nJ4A8L95xoUm4jl5emHwGusQ,6148
1
2
  eva/__init__.py,sha256=bYBwklT7diG8NBIBDbpwjN4RUsvGv0ShWBXPxWgz404,518
2
3
  eva/__main__.py,sha256=kM5tQ0egTuBWixNLLx9QU-PpS2Bbs3zE3nYE6b2vWa0,282
3
4
  eva/__version__.py,sha256=YFR4oOlvPg0sS4Ni7GJ_vU42VTs5WiWp6odK7yH4TBY,611
4
5
  eva/core/__init__.py,sha256=AYlMZcH76B7I1lOa-E67u2o9DxsCwI4JMLCYXLk9oDQ,451
5
- eva/core/callbacks/__init__.py,sha256=tglEHfVB0NSaXenNLQAlyb2ufXiamxfR0o15W9a9t0k,110
6
+ eva/core/callbacks/__init__.py,sha256=tHC6HJkLMKI5ThuSOuk3XLMvym4v2EtlDqrT91aAdmg,191
7
+ eva/core/callbacks/config.py,sha256=-DRt20a2aF9Z9-7nZvbGBcOZ30qNf3ESf25EPRgRL1w,4267
6
8
  eva/core/callbacks/writers/__init__.py,sha256=GG8UXkbgNmpN1u_YIw-QysBGTSFm6C-P2XRtfnvVBrY,121
7
9
  eva/core/callbacks/writers/embeddings.py,sha256=q8Gd2_aDhhD7-QT5xAJbm8ikNjmHq_2DJosJ6WS5BOo,6752
8
10
  eva/core/callbacks/writers/typings.py,sha256=5AVIRAftqPTlLQ8s4ArEcMgLCyPnCZ9FFNk2yFppA1g,616
@@ -17,18 +19,31 @@ eva/core/data/datamodules/__init__.py,sha256=qZchYbgxo9lxYnGoqdk0C6MfS2IbF0WItO0
17
19
  eva/core/data/datamodules/call.py,sha256=jjj9w3UXYuQB-qyCcw1EZpRJW10OC1I3dvgvsuQWLck,940
18
20
  eva/core/data/datamodules/datamodule.py,sha256=dclC2YJAXUGEUpV9ZRWQS43-ksFIPgVeFudsyrj9kdc,3878
19
21
  eva/core/data/datamodules/schemas.py,sha256=EXnUPNd9Pj3RjnxJIzAcC2qp6TtBSvPDx28fV_ovWAA,1869
20
- eva/core/data/datasets/__init__.py,sha256=pMU-w6aQoRFgSwPB9GLXNilnsbwd6HqLlgw49e4rlj0,281
22
+ eva/core/data/datasets/__init__.py,sha256=Y_18JE9sDtwEFdgPiKHf_ZqMj_1SUQtRZeiI2n7t1uA,387
21
23
  eva/core/data/datasets/base.py,sha256=NLZlxznB4SCYNf070OhfNJztaOpqwQWemwpGkFv_CA0,2005
22
- eva/core/data/datasets/classification/__init__.py,sha256=nhgeoK0chIvX9kzrp9yz40xha_g78o-R4AV7HbyfQOU,176
23
- eva/core/data/datasets/classification/embeddings.py,sha256=E2vYX5q0wDueIZwfB252aM4l9i8XNuf2BQ1lOJR32P4,5215
24
24
  eva/core/data/datasets/dataset.py,sha256=tA6Wd_7vqOE9GsukSWrgN9zaZKtKCHaE58SqIfWxWdg,124
25
+ eva/core/data/datasets/embeddings/__init__.py,sha256=kxJtohGI6oP2lJiDVb61w7NMXuVD3np2B5wbuMIu6K4,357
26
+ eva/core/data/datasets/embeddings/base.py,sha256=EXGa4qsXMA1LkdcZEdjnQq-ooSdw2VitjvpZLcTB8ug,4979
27
+ eva/core/data/datasets/embeddings/classification/__init__.py,sha256=E2zyht5De7RnTVF-MRbteCzdUt4_Of2zJx3jolPpCdM,371
28
+ eva/core/data/datasets/embeddings/classification/embeddings.py,sha256=d5s9f58oQKuRyzWGAg2RXFG41NjLWqu56O3_CI77S_8,2467
29
+ eva/core/data/datasets/embeddings/classification/multi_embeddings.py,sha256=737KakBBui3zvMWKqoXWIG_qa5yaxhgtGP_iBCKnnPY,4343
25
30
  eva/core/data/samplers/__init__.py,sha256=WikBo1DemCx6o2vFfNwSwODlmCT2zWUXtCNwiWCVAFE,100
26
31
  eva/core/data/samplers/sampler.py,sha256=vrrXERWC67fjmTk_uwD7s9-8-rdhvnx7OlSipHE6sdY,119
27
- eva/core/data/transforms/__init__.py,sha256=ZdqOER1VMLu5nI1As0IuBPTS8ISw0EO4zeMZoio3G-c,157
32
+ eva/core/data/transforms/__init__.py,sha256=n0TczmJSc9EjR6JezAZqlZIN4Gz_X3UBePbyDSC7JkE,308
28
33
  eva/core/data/transforms/dtype/__init__.py,sha256=r_LM_hdh_gTsrgh3shDTdMpu-lgQNHJ1yD6wY3omPyg,174
29
34
  eva/core/data/transforms/dtype/array.py,sha256=RDSkXlnSHSYyU_gv7vw33OZ7vhEy62PQGoE3htGGaqc,725
35
+ eva/core/data/transforms/padding/__init__.py,sha256=AKSXa2dOhj45dTw81piPoCfDmIL0FPJUIxZ3HlG7KVM,138
36
+ eva/core/data/transforms/padding/pad_2d_tensor.py,sha256=J4maGFmeQf9IHRxt5kU-6eI-Bvk12F_HVk8kR_omrnY,1185
37
+ eva/core/data/transforms/sampling/__init__.py,sha256=BFKbvRjlZrwS0GcNrM54ZSWt6PrQARfFlXM1jJ-wpvo,149
38
+ eva/core/data/transforms/sampling/sample_from_axis.py,sha256=Zbhp94lVa70WQKmSOKMTsOMe2c7wLqNZto7JqWhSdtI,1229
30
39
  eva/core/interface/__init__.py,sha256=chdpKXipxe1NP-Fgr_d9r6X1gMna0XiEa38waJ6FzTM,98
31
40
  eva/core/interface/interface.py,sha256=GzjneNHhTIEuLbydUG9cSmpHjJ4_IENGM-glN8RaRxY,2741
41
+ eva/core/loggers/__init__.py,sha256=4YMLNlN9LnuKqhBI1R1keh69dmMD-2lcH3HKwwyn380,266
42
+ eva/core/loggers/dummy.py,sha256=Y7ypH0ecSAIkkZ5LzTmNNEzlKkqeaHfUNMCDKVOg6D4,1204
43
+ eva/core/loggers/experimental_loggers.py,sha256=JjW_SAABnNZyhJEy6kcPkPKDPyvuAj4sQTdn-tewm8U,204
44
+ eva/core/loggers/log/__init__.py,sha256=cPqLKqIUhiTeDNG5ZUUOExDr76y4LBe4yXUvCxIQh0o,124
45
+ eva/core/loggers/log/parameters.py,sha256=7qxa-ndtYb2_UkPZ4AlcHaGeUvEw2_9phCdco_OZ9Oo,1413
46
+ eva/core/loggers/log/utils.py,sha256=k4Q7uKpAQctfDv0EEYPnPv6wt9LnckEeqGvbYSLfKO0,415
32
47
  eva/core/metrics/__init__.py,sha256=sTpNUbvgpKTd1IifzPVWmXVZ17PSJjcfEAlY_3fZP5U,558
33
48
  eva/core/metrics/average_loss.py,sha256=AyFOnCXBD5T62eSYf6eGAAJsqt8x-KaHgc8OLkCHjzE,1267
34
49
  eva/core/metrics/binary_balanced_accuracy.py,sha256=MabsXAtVfLqSaSIIpE0HIM6bo8uRszl6obueHI6vJi0,806
@@ -44,9 +59,9 @@ eva/core/metrics/structs/schemas.py,sha256=S6dTbz6YjxkNUIqWVd52KgpVx5JqNFqM4Xs7z
44
59
  eva/core/metrics/structs/typings.py,sha256=qJd-FiD2IhJgBeo8FyP0vpVUIH4RKb1k6zYvHtjUA04,388
45
60
  eva/core/models/__init__.py,sha256=yRLRKYuShhgQBWzV6sjzjThOqqNb9HRS48bMmAxEy-8,305
46
61
  eva/core/models/modules/__init__.py,sha256=QJWJ42BceXZBzDGgk5FHBcCaRrB9egTFKVF6gDsBYfM,255
47
- eva/core/models/modules/head.py,sha256=Rn9bJmSRWqNPnRl-YpvPZgtjbr6UNU83oHXQ1gEpAJE,4138
62
+ eva/core/models/modules/head.py,sha256=sJ2RTNmOsqpCczajwgp6nHZ5qELhjEs8BhO36KyDG-g,3992
48
63
  eva/core/models/modules/inference.py,sha256=ih-0Rr2oNf2N6maiXPOW7XH5KVwUT1_MOxnJKOhJ1uQ,978
49
- eva/core/models/modules/module.py,sha256=0KquNWeblYg49S8AoV-Dpc65_ZIWl56ht5R7bKKXj7o,5918
64
+ eva/core/models/modules/module.py,sha256=7mCzyvBNOWhvN8sNa91yB79iSBlJlYh9sypL37Nwdes,6836
50
65
  eva/core/models/modules/typings.py,sha256=fNoGsC_q1d9c2KauUC-f1psKrCFmfoeC8JJ_US_pOW0,521
51
66
  eva/core/models/modules/utils/__init__.py,sha256=pnbxlEhT87JimWNr-NSNCv7VNR-IyDi_A9qRWmvlzwQ,227
52
67
  eva/core/models/modules/utils/batch_postprocess.py,sha256=q1kC3pwSS7RyI76qunuvFP7RpUhEpUsR6xzjahJkQKQ,1915
@@ -63,10 +78,10 @@ eva/core/models/networks/wrappers/huggingface.py,sha256=81j0pcEx3DW6gR-81Fz6tZkJ
63
78
  eva/core/models/networks/wrappers/onnx.py,sha256=LZEGOpg1VYrB3wXMAA5IMfiKNTkOXQ50agHjTvYnnsU,1718
64
79
  eva/core/trainers/__init__.py,sha256=jhsKJF7HAae7EOiG3gKIAHH_h3dZlTE2JRcCHJmOzJc,208
65
80
  eva/core/trainers/_logging.py,sha256=gi4FqPy2GuVmh0WZY6mYwF7zMPvnoFA050B0XdCP6PU,2571
66
- eva/core/trainers/_recorder.py,sha256=_Vfp7Njh_9qP-SWbBGYp8solnfFgIUi2Z9pGLXt52WY,5652
81
+ eva/core/trainers/_recorder.py,sha256=y6i5hfXftWjeV3eQHmMjUOkWumnZ2QNv_u275LLmvPA,7702
67
82
  eva/core/trainers/_utils.py,sha256=M3h8lVhUmkeSiEXpX9hRdMvThGFCnTP15gv-hd1CZkc,321
68
- eva/core/trainers/functional.py,sha256=pIeGXoO63Wh6n1mOYlBo5ACCteGuNV9pZhqxfN4RLSs,3775
69
- eva/core/trainers/trainer.py,sha256=j4rYWiG9COxBbZ6WIlpRPBE153XqlYK7eAWlRsZgljU,3261
83
+ eva/core/trainers/functional.py,sha256=Ju0Eh98oxsj_7QBwxu3GddCD7FROEdQgpp9fdGS9sp4,4360
84
+ eva/core/trainers/trainer.py,sha256=Vw_KhTyh-3YV5qo_XHxz9oy-v2PxrgoOWMeYi8-41R0,3949
70
85
  eva/core/utils/__init__.py,sha256=F1C69M9y7W8qh1J2k-X4frRHa7r1mPXewscC94fFYtk,58
71
86
  eva/core/utils/io/__init__.py,sha256=SAME0kuSvDE1DKFJwMBmnCkpDAy4ujXuRTSJsHNhwUI,112
72
87
  eva/core/utils/io/dataframe.py,sha256=CIHFowljH17waDkJ9YJVEVXAIcxMwoLjUgoBttiNk8w,509
@@ -74,37 +89,39 @@ eva/core/utils/multiprocessing.py,sha256=PxUxMyvI62lghyWF46O5RNL-J7DUR2IrXSwdkbh
74
89
  eva/core/utils/workers.py,sha256=hfx63M82qNg0Dwhre2tl53MnhtRsV7APaDONM9nhVB8,634
75
90
  eva/vision/__init__.py,sha256=Z9AuPmTO-i73pUtq3IkZzwRlY1E5xCE8IZiyl5S71TM,438
76
91
  eva/vision/data/__init__.py,sha256=aoKPmX8P2Q2k2W3nlq8vFU41FV6Sze-0SDuWtU-ETh4,111
77
- eva/vision/data/datasets/__init__.py,sha256=aV4qPqtlt0PnaGoxUW_xEwAr8b8ddkl_YE4_fAdavds,497
78
- eva/vision/data/datasets/_utils.py,sha256=5GAZEHn-VezxTXaW1jVZO5zvdVl1Vz8_5gV2qkoMu4s,1414
92
+ eva/vision/data/datasets/__init__.py,sha256=0pGqI6m4zUEwHPFHcE_7z2MsDS7hG9xL8mnXvSPklOo,402
93
+ eva/vision/data/datasets/_utils.py,sha256=epPcaYE4w2_LtUKLLQJh6qQxUNVBe22JA06k4WUerYQ,1430
79
94
  eva/vision/data/datasets/_validators.py,sha256=uPbbUNnftb8mYzsKVrF-ZX_xinB2zQkuQLFYMprVjhY,2099
80
- eva/vision/data/datasets/classification/__init__.py,sha256=I9vTkETzGnTNNvyRB96ut1YHx9ARmZVO0-0l3ZLWEAs,520
95
+ eva/vision/data/datasets/classification/__init__.py,sha256=DZq9__B4D2x1fbQsG-SwcOR_KF8ZFgCtwTzhC16p63c,362
81
96
  eva/vision/data/datasets/classification/bach.py,sha256=_xuA4evV9jCI76bUKbzom4ECLKShCsd95S8PtvhRAH4,5637
82
97
  eva/vision/data/datasets/classification/base.py,sha256=zBqn8rQP59j1DEChf3rDXgyMtB_sbug8kPvgFCqZyl4,3060
83
98
  eva/vision/data/datasets/classification/crc.py,sha256=7RR0PJWnhLMa3AUB_F2XMYawF5gnCNbGMv25ejOEeNA,5875
84
99
  eva/vision/data/datasets/classification/mhist.py,sha256=yoDHZ2vqa26YKVvJ9t6aidOVGazGIwUD6F3o0zNsxjM,3257
85
100
  eva/vision/data/datasets/classification/patch_camelyon.py,sha256=CH9sveoMppNWPQHm4qPTONRSGqX3O8P3OYwMB6mO678,7253
86
- eva/vision/data/datasets/classification/total_segmentator.py,sha256=OkbqS41ykdUX0wGf6jSja5WzeeRmevUnH5alfcEQhwg,8069
87
101
  eva/vision/data/datasets/segmentation/__init__.py,sha256=byQCBHicM6mQkljHPllUqRvoFaJxHtPMKcyjPmK6dUM,249
88
- eva/vision/data/datasets/segmentation/base.py,sha256=JogXJ3KiOaUybAcyvoqjR4yjlBfVTt2Rt8OOAz32Jrc,3630
89
- eva/vision/data/datasets/segmentation/total_segmentator.py,sha256=NUh-NlrsTcUsbe3qLd_d481mok970bNF7zIdpAS7eks,8075
102
+ eva/vision/data/datasets/segmentation/base.py,sha256=XSD-hl7UtqshzF9ulImlL7v11lT2rWhhro7K3dF0Ucs,2937
103
+ eva/vision/data/datasets/segmentation/total_segmentator.py,sha256=xKGQN03ybOXQE1EtO9WRnAZ_8B40uXtaz2WliNFJ4Uc,8705
90
104
  eva/vision/data/datasets/structs.py,sha256=RaTDW-B36PumcR5gymhCiX-r8GiKqIFcjqoEEjjFyUE,389
91
105
  eva/vision/data/datasets/vision.py,sha256=hKKFMb65UJQzOyYm8FTGkOGBOinMRu7R8sOFMbCmQX4,1100
92
106
  eva/vision/data/transforms/__init__.py,sha256=cHnLwyx6biAjqstD4IDspVtM-_dv7GBrQG6x_0SM8MM,120
93
107
  eva/vision/data/transforms/common/__init__.py,sha256=ZHzpdr-THc9CgFFbAVMWUiZrUNUiHnCDM8GYhM7tMfU,138
94
108
  eva/vision/data/transforms/common/resize_and_crop.py,sha256=IkAeTOe5TxK_cHzFvS7yW8YUh27C-KjXqekL7pfcT9A,1485
109
+ eva/vision/models/.DS_Store,sha256=CKEtWIsQcwe9BwZ8pyCnXleWpY_4ilCps_OuRG9ZGaM,8196
95
110
  eva/vision/models/__init__.py,sha256=v9JhyLdy38XUkA0JmNMzSxNYmKi7nWBrp_XYgM7dmTU,89
111
+ eva/vision/models/networks/.DS_Store,sha256=1lFlJ5EFymdzGAUAaI30vcaaLHt3F1LwpG7xILf9jsM,6148
96
112
  eva/vision/models/networks/__init__.py,sha256=IDpFsocWtyfe28vR9yMmXPYzV9X2NSDrTx7ewH8u-XU,170
97
113
  eva/vision/models/networks/abmil.py,sha256=N1eH4fn1nXmgXurSQyQIxxonv7nsqeeuPWaQSHeltfs,6796
98
114
  eva/vision/models/networks/postprocesses/__init__.py,sha256=nWBuROKE77W9xfyAxmS6L9IgOaXjcB5Qpaw1ihHG64E,148
99
115
  eva/vision/models/networks/postprocesses/cls.py,sha256=AFRrCjSmKy0n14toKa-G-QVx3dh-H4zSx5myi_P2OFA,822
100
116
  eva/vision/utils/__init__.py,sha256=vaUovprE743SmyFH8l6uk4pYSWpI4zxn7lN0EwePTJI,96
117
+ eva/vision/utils/convert.py,sha256=tP0ps_IK2BXkuUsmI0UXVrSbmDAbjfLtQt3AiFtT2j4,679
101
118
  eva/vision/utils/io/__init__.py,sha256=Aw2UxGO3nbUidroMlS-MMJUALjQVvfsOZ1ZhcENDwRo,310
102
119
  eva/vision/utils/io/_utils.py,sha256=JzOt7Frj6ScF_aNjFtfHBn4ROnl6NhUZucmQhLc4Cww,768
103
120
  eva/vision/utils/io/image.py,sha256=2jzeVFMvIRhuTkIrQeLyu0y8GttLp6rWRjO9I2uw-I8,1489
104
- eva/vision/utils/io/nifti.py,sha256=ph9w8dNNSsJG2wI3NJNPTLyWdz2S0i9jD068nHXVVJs,1510
121
+ eva/vision/utils/io/nifti.py,sha256=Rfbz49IakkAMimN8VnHJxDgxqTVwxGfxWFrTMXLrbJc,1659
105
122
  eva/vision/utils/io/text.py,sha256=uECChKjeKi4KQ-NqdO7ywAFS_TOEp2DQ5QQcuG8cb-4,472
106
- kaiko_eva-0.0.1.dist-info/METADATA,sha256=ftjXJlWbHwUT3YNSBxtaMClK_iUHh95oHl0POcZVNCA,22362
107
- kaiko_eva-0.0.1.dist-info/WHEEL,sha256=N2J68yzZqJh3mI_Wg92rwhw0rtJDFpZj9bwQIMJgaVg,90
108
- kaiko_eva-0.0.1.dist-info/entry_points.txt,sha256=oqtS2Yt5EBY4saLyCBC3Zev3huCORKTKWyPovX7QR8g,73
109
- kaiko_eva-0.0.1.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
110
- kaiko_eva-0.0.1.dist-info/RECORD,,
123
+ kaiko_eva-0.0.2.dist-info/METADATA,sha256=R4SakGonLZsdyj2yEgkkOa1Z96INt2AfP6Gvak8eN68,23562
124
+ kaiko_eva-0.0.2.dist-info/WHEEL,sha256=vnE8JVcI2Wz7GRKorsPArnBdnW2SWKWGow5gu5tHlRU,90
125
+ kaiko_eva-0.0.2.dist-info/entry_points.txt,sha256=oqtS2Yt5EBY4saLyCBC3Zev3huCORKTKWyPovX7QR8g,73
126
+ kaiko_eva-0.0.2.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
127
+ kaiko_eva-0.0.2.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: pdm-backend (2.1.8)
2
+ Generator: pdm-backend (2.3.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,5 +0,0 @@
1
- """Classification datasets API."""
2
-
3
- from eva.core.data.datasets.classification.embeddings import EmbeddingsClassificationDataset
4
-
5
- __all__ = ["EmbeddingsClassificationDataset"]