kaiko-eva 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,6 @@
1
1
  """Core I/O utilities."""
2
2
 
3
3
  from eva.core.utils.io.dataframe import read_dataframe
4
+ from eva.core.utils.io.gz import gunzip_file
4
5
 
5
- __all__ = ["read_dataframe"]
6
+ __all__ = ["read_dataframe", "gunzip_file"]
@@ -0,0 +1,28 @@
1
+ """Utils for .gz files."""
2
+
3
+ import gzip
4
+ import os
5
+
6
+
7
+ def gunzip_file(path: str, unpack_dir: str | None = None, keep: bool = True) -> str:
8
+ """Unpacks a .gz file to the provided directory.
9
+
10
+ Args:
11
+ path: Path to the .gz file to extract.
12
+ unpack_dir: Directory to extract the file to. If `None`, it will use the
13
+ same directory as the compressed file.
14
+ keep: Whether to keep the compressed .gz file.
15
+
16
+ Returns:
17
+ The path to the extracted file.
18
+ """
19
+ unpack_dir = unpack_dir or os.path.dirname(path)
20
+ os.makedirs(unpack_dir, exist_ok=True)
21
+ save_path = os.path.join(unpack_dir, os.path.basename(path).replace(".gz", ""))
22
+ if not os.path.isfile(save_path):
23
+ with gzip.open(path, "rb") as f_in:
24
+ with open(save_path, "wb") as f_out:
25
+ f_out.write(f_in.read())
26
+ if not keep:
27
+ os.remove(path)
28
+ return save_path
@@ -3,7 +3,10 @@
3
3
  import multiprocessing
4
4
  import sys
5
5
  import traceback
6
- from typing import Any
6
+ from concurrent.futures import ThreadPoolExecutor, as_completed
7
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar
8
+
9
+ from eva.core.utils.progress_bar import tqdm
7
10
 
8
11
 
9
12
  class Process(multiprocessing.Process):
@@ -42,3 +45,45 @@ class Process(multiprocessing.Process):
42
45
  error, traceback = self.exception
43
46
  sys.stderr.write(traceback + "\n")
44
47
  raise error
48
+
49
+
50
+ R = TypeVar("R")
51
+
52
+
53
+ def run_with_threads(
54
+ func: Callable[..., R],
55
+ items: Iterable[Tuple[Any, ...]],
56
+ kwargs: Dict[str, Any] | None = None,
57
+ num_workers: int = 8,
58
+ progress_desc: Optional[str] = None,
59
+ show_progress: bool = True,
60
+ return_results: bool = True,
61
+ ) -> List[R] | None:
62
+ """Process items with multiple threads using ThreadPoolExecutor.
63
+
64
+ Args:
65
+ func: Function to execute for each item
66
+ items: Iterable of items to process. Each item should be a tuple of
67
+ arguments to pass to func.
68
+ kwargs: Additional keyword arguments to pass to func.
69
+ num_workers: Number of worker threads
70
+ progress_desc: Description for progress bar
71
+ show_progress: Whether to show progress bar
72
+ return_results: Whether to return the results. If False, the function
73
+ will return None.
74
+
75
+ Returns:
76
+ List of results if return_results is True, otherwise None
77
+ """
78
+ results: List[Any] = []
79
+
80
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
81
+ futures = [executor.submit(func, *args, **(kwargs or {})) for args in items]
82
+ pbar = tqdm(total=len(futures), desc=progress_desc, disable=not show_progress, leave=False)
83
+ for future in as_completed(futures):
84
+ if return_results:
85
+ results.append(future.result())
86
+ pbar.update(1)
87
+ pbar.close()
88
+
89
+ return results if return_results else None
@@ -128,7 +128,7 @@ def _draw_semantic_mask(tensor: torch.Tensor) -> torch.Tensor:
128
128
  integer values which represent the pixel class id.
129
129
 
130
130
  Args:
131
- tensor: An image tensor of range [0., 1.].
131
+ tensor: An image tensor of range [0., N_CLASSES].
132
132
 
133
133
  Returns:
134
134
  The image as a tensor of range [0., 255.].
@@ -136,9 +136,11 @@ def _draw_semantic_mask(tensor: torch.Tensor) -> torch.Tensor:
136
136
  tensor = torch.squeeze(tensor)
137
137
  height, width = tensor.shape[-2], tensor.shape[-1]
138
138
  red, green, blue = torch.zeros((3, height, width), dtype=torch.uint8)
139
- for class_id, color in colormap.COLORMAP.items():
139
+ class_ids = torch.unique(tensor)
140
+ colors = colormap.get_colors(max(class_ids))
141
+ for class_id in class_ids:
140
142
  indices = tensor == class_id
141
- red[indices], green[indices], blue[indices] = color
143
+ red[indices], green[indices], blue[indices] = colors[int(class_id)]
142
144
  return torch.stack([red, green, blue])
143
145
 
144
146
 
@@ -157,8 +159,9 @@ def _overlay_mask(image: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
157
159
  from the predefined colormap.
158
160
  """
159
161
  binary_masks = functional.one_hot(mask).permute(2, 0, 1).to(dtype=torch.bool)
162
+ colors = colormap.get_colors(binary_masks.shape[0] + 1)
160
163
  return torchvision.utils.draw_segmentation_masks(
161
- image, binary_masks[1:], alpha=0.65, colors=colormap.COLORS[1:] # type: ignore
164
+ image, binary_masks[1:], alpha=0.65, colors=colors[1:] # type: ignore
162
165
  )
163
166
 
164
167
 
@@ -3,6 +3,7 @@
3
3
  import functools
4
4
  import os
5
5
  from glob import glob
6
+ from pathlib import Path
6
7
  from typing import Any, Callable, Dict, List, Literal, Tuple
7
8
 
8
9
  import numpy as np
@@ -12,7 +13,8 @@ from torchvision import tv_tensors
12
13
  from torchvision.datasets import utils
13
14
  from typing_extensions import override
14
15
 
15
- from eva.core.utils.progress_bar import tqdm
16
+ from eva.core.utils import io as core_io
17
+ from eva.core.utils import multiprocessing
16
18
  from eva.vision.data.datasets import _validators, structs
17
19
  from eva.vision.data.datasets.segmentation import base
18
20
  from eva.vision.utils import io
@@ -65,6 +67,8 @@ class TotalSegmentator2D(base.ImageSegmentation):
65
67
  download: bool = False,
66
68
  classes: List[str] | None = None,
67
69
  optimize_mask_loading: bool = True,
70
+ decompress: bool = True,
71
+ num_workers: int = 10,
68
72
  transforms: Callable | None = None,
69
73
  ) -> None:
70
74
  """Initialize dataset.
@@ -85,8 +89,15 @@ class TotalSegmentator2D(base.ImageSegmentation):
85
89
  in order to optimize the loading time. In the `setup` method, it
86
90
  will reformat the binary one-hot masks to a semantic mask and store
87
91
  it on disk.
92
+ decompress: Whether to decompress the ct.nii.gz files when preparing the data.
93
+ The label masks won't be decompressed, but when enabling optimize_mask_loading
94
+ it will export the semantic label masks to a single file in uncompressed .nii
95
+ format.
96
+ num_workers: The number of workers to use for optimizing the masks &
97
+ decompressing the .gz files.
88
98
  transforms: A function/transforms that takes in an image and a target
89
99
  mask and returns the transformed versions of both.
100
+
90
101
  """
91
102
  super().__init__(transforms=transforms)
92
103
 
@@ -96,6 +107,8 @@ class TotalSegmentator2D(base.ImageSegmentation):
96
107
  self._download = download
97
108
  self._classes = classes
98
109
  self._optimize_mask_loading = optimize_mask_loading
110
+ self._decompress = decompress
111
+ self._num_workers = num_workers
99
112
 
100
113
  if self._optimize_mask_loading and self._classes is not None:
101
114
  raise ValueError(
@@ -128,23 +141,29 @@ class TotalSegmentator2D(base.ImageSegmentation):
128
141
  def class_to_idx(self) -> Dict[str, int]:
129
142
  return {label: index for index, label in enumerate(self.classes)}
130
143
 
144
+ @property
145
+ def _file_suffix(self) -> str:
146
+ return "nii" if self._decompress else "nii.gz"
147
+
131
148
  @override
132
- def filename(self, index: int, segmented: bool = True) -> str:
149
+ def filename(self, index: int) -> str:
133
150
  sample_idx, _ = self._indices[index]
134
151
  sample_dir = self._samples_dirs[sample_idx]
135
- return os.path.join(sample_dir, "ct.nii.gz")
152
+ return os.path.join(sample_dir, f"ct.{self._file_suffix}")
136
153
 
137
154
  @override
138
155
  def prepare_data(self) -> None:
139
156
  if self._download:
140
157
  self._download_dataset()
158
+ if self._decompress:
159
+ self._decompress_files()
160
+ self._samples_dirs = self._fetch_samples_dirs()
161
+ if self._optimize_mask_loading:
162
+ self._export_semantic_label_masks()
141
163
 
142
164
  @override
143
165
  def configure(self) -> None:
144
- self._samples_dirs = self._fetch_samples_dirs()
145
166
  self._indices = self._create_indices()
146
- if self._optimize_mask_loading:
147
- self._export_semantic_label_masks()
148
167
 
149
168
  @override
150
169
  def validate(self) -> None:
@@ -186,16 +205,15 @@ class TotalSegmentator2D(base.ImageSegmentation):
186
205
  return {"slice_index": slice_index}
187
206
 
188
207
  def _load_mask(self, index: int) -> tv_tensors.Mask:
189
- """Loads and builds the segmentation mask from NifTi files."""
190
208
  sample_index, slice_index = self._indices[index]
191
209
  semantic_labels = self._load_masks_as_semantic_label(sample_index, slice_index)
192
- return tv_tensors.Mask(semantic_labels, dtype=torch.int64) # type: ignore[reportCallIssue]
210
+ return tv_tensors.Mask(semantic_labels.squeeze(), dtype=torch.int64) # type: ignore[reportCallIssue]
193
211
 
194
212
  def _load_semantic_label_mask(self, index: int) -> tv_tensors.Mask:
195
213
  """Loads the segmentation mask from a semantic label NifTi file."""
196
214
  sample_index, slice_index = self._indices[index]
197
215
  masks_dir = self._get_masks_dir(sample_index)
198
- filename = os.path.join(masks_dir, "semantic_labels", "masks.nii.gz")
216
+ filename = os.path.join(masks_dir, "semantic_labels", "masks.nii")
199
217
  semantic_labels = io.read_nifti(filename, slice_index)
200
218
  return tv_tensors.Mask(semantic_labels.squeeze(), dtype=torch.int64) # type: ignore[reportCallIssue]
201
219
 
@@ -209,7 +227,7 @@ class TotalSegmentator2D(base.ImageSegmentation):
209
227
  slice_index: Whether to return only a specific slice.
210
228
  """
211
229
  masks_dir = self._get_masks_dir(sample_index)
212
- mask_paths = [os.path.join(masks_dir, label + ".nii.gz") for label in self.classes]
230
+ mask_paths = [os.path.join(masks_dir, f"{label}.nii.gz") for label in self.classes]
213
231
  binary_masks = [io.read_nifti(path, slice_index) for path in mask_paths]
214
232
  background_mask = np.zeros_like(binary_masks[0])
215
233
  return np.argmax([background_mask] + binary_masks, axis=0)
@@ -219,24 +237,28 @@ class TotalSegmentator2D(base.ImageSegmentation):
219
237
  total_samples = len(self._samples_dirs)
220
238
  masks_dirs = map(self._get_masks_dir, range(total_samples))
221
239
  semantic_labels = [
222
- (index, os.path.join(directory, "semantic_labels", "masks.nii.gz"))
240
+ (index, os.path.join(directory, "semantic_labels", "masks.nii"))
223
241
  for index, directory in enumerate(masks_dirs)
224
242
  ]
225
243
  to_export = filter(lambda x: not os.path.isfile(x[1]), semantic_labels)
226
244
 
227
- for sample_index, filename in tqdm(
228
- list(to_export),
229
- desc=">> Exporting optimized semantic masks",
230
- leave=False,
231
- ):
245
+ def _process_mask(sample_index: Any, filename: str) -> None:
232
246
  semantic_labels = self._load_masks_as_semantic_label(sample_index)
233
247
  os.makedirs(os.path.dirname(filename), exist_ok=True)
234
248
  io.save_array_as_nifti(semantic_labels, filename)
235
249
 
250
+ multiprocessing.run_with_threads(
251
+ _process_mask,
252
+ list(to_export),
253
+ num_workers=self._num_workers,
254
+ progress_desc=">> Exporting optimized semantic mask",
255
+ return_results=False,
256
+ )
257
+
236
258
  def _get_image_path(self, sample_index: int) -> str:
237
259
  """Returns the corresponding image path."""
238
260
  sample_dir = self._samples_dirs[sample_index]
239
- return os.path.join(self._root, sample_dir, "ct.nii.gz")
261
+ return os.path.join(self._root, sample_dir, f"ct.{self._file_suffix}")
240
262
 
241
263
  def _get_masks_dir(self, sample_index: int) -> str:
242
264
  """Returns the directory of the corresponding masks."""
@@ -246,7 +268,7 @@ class TotalSegmentator2D(base.ImageSegmentation):
246
268
  def _get_semantic_labels_filename(self, sample_index: int) -> str:
247
269
  """Returns the semantic label filename."""
248
270
  masks_dir = self._get_masks_dir(sample_index)
249
- return os.path.join(masks_dir, "semantic_labels", "masks.nii.gz")
271
+ return os.path.join(masks_dir, "semantic_labels", "masks.nii")
250
272
 
251
273
  def _get_number_of_slices_per_sample(self, sample_index: int) -> int:
252
274
  """Returns the total amount of slices of a sample."""
@@ -320,6 +342,16 @@ class TotalSegmentator2D(base.ImageSegmentation):
320
342
  remove_finished=True,
321
343
  )
322
344
 
345
+ def _decompress_files(self) -> None:
346
+ compressed_paths = Path(self._root).rglob("*/ct.nii.gz")
347
+ multiprocessing.run_with_threads(
348
+ core_io.gunzip_file,
349
+ [(str(path),) for path in compressed_paths],
350
+ num_workers=self._num_workers,
351
+ progress_desc=">> Decompressing .gz files",
352
+ return_results=False,
353
+ )
354
+
323
355
  def _print_license(self) -> None:
324
356
  """Prints the dataset license."""
325
357
  print(f"Dataset license: {self._license}")
@@ -103,7 +103,7 @@ class SemanticSegmentationModule(module.ModelModule):
103
103
  "decoder should map the embeddings (`inputs`) to."
104
104
  )
105
105
  features = self.encoder(inputs) if self.encoder else inputs
106
- decoder_inputs = DecoderInputs(features, inputs.shape[-2:], inputs) # type: ignore
106
+ decoder_inputs = DecoderInputs(features, to_size or inputs.shape[-2:], inputs) # type: ignore
107
107
  return self.decoder(decoder_inputs)
108
108
 
109
109
  @override
@@ -1,5 +1,7 @@
1
1
  """Color mapping constants."""
2
2
 
3
+ from typing import List, Tuple
4
+
3
5
  COLORS = [
4
6
  (0, 0, 0),
5
7
  (255, 0, 0), # Red
@@ -75,3 +77,21 @@ COLORS = [
75
77
 
76
78
  COLORMAP = dict(enumerate(COLORS)) | {255: (255, 255, 255)}
77
79
  """Class id to RGB color mapping."""
80
+
81
+
82
+ def get_colors(num_colors: int) -> List[Tuple[int, int, int]]:
83
+ """Get a list of RGB colors.
84
+
85
+ If the number of colors is greater than the predefined colors, it will
86
+ repeat the colors until it reaches the requested number
87
+
88
+ Args:
89
+ num_colors: The number of colors to return.
90
+
91
+ Returns:
92
+ A list of RGB colors.
93
+ """
94
+ colors = COLORS
95
+ while len(colors) < num_colors:
96
+ colors = colors + COLORS[1:]
97
+ return colors
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kaiko-eva
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: Evaluation Framework for oncology foundation models.
5
5
  Keywords: machine-learning,evaluation-framework,oncology,foundation-models
6
6
  Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
@@ -216,14 +216,14 @@ Project-URL: Homepage, https://kaiko-ai.github.io/eva/dev/
216
216
  Project-URL: Repository, https://github.com/kaiko-ai/eva
217
217
  Project-URL: Documentation, https://kaiko-ai.github.io/eva/dev/
218
218
  Requires-Python: >=3.10
219
- Requires-Dist: torch==2.3.0
220
- Requires-Dist: lightning>=2.2.2
221
- Requires-Dist: jsonargparse[omegaconf]==4.31.0
219
+ Requires-Dist: torch>=2.3.0
220
+ Requires-Dist: lightning>=2.2.0
221
+ Requires-Dist: jsonargparse[omegaconf]>=4.30.0
222
222
  Requires-Dist: tensorboard>=2.16.2
223
223
  Requires-Dist: loguru>=0.7.2
224
- Requires-Dist: pandas>=2.2.0
224
+ Requires-Dist: pandas>=2.0.0
225
225
  Requires-Dist: transformers>=4.38.2
226
- Requires-Dist: onnxruntime>=1.17.1
226
+ Requires-Dist: onnxruntime>=1.15.1
227
227
  Requires-Dist: onnx>=1.16.0
228
228
  Requires-Dist: toolz>=0.12.1
229
229
  Requires-Dist: rich>=13.7.1
@@ -94,10 +94,11 @@ eva/core/trainers/functional.py,sha256=NPxFCtU5KgquVowjeXAf_xj4-Thj7ZxN9F3sHRDrD
94
94
  eva/core/trainers/trainer.py,sha256=Vw_KhTyh-3YV5qo_XHxz9oy-v2PxrgoOWMeYi8-41R0,3949
95
95
  eva/core/utils/__init__.py,sha256=cndVBvtYxEW7hykH39GCNVI86zkXNn8Lw2A0sUJHS04,237
96
96
  eva/core/utils/clone.py,sha256=qcThZOuAs1cs0uV3BL5eKeM2VIBjuRPBe1t-NiUFM5Y,569
97
- eva/core/utils/io/__init__.py,sha256=SAME0kuSvDE1DKFJwMBmnCkpDAy4ujXuRTSJsHNhwUI,112
97
+ eva/core/utils/io/__init__.py,sha256=Py03AmoxhmTHkro6CzNps27uXKkXPzdA18mG97xHhWI,172
98
98
  eva/core/utils/io/dataframe.py,sha256=CIHFowljH17waDkJ9YJVEVXAIcxMwoLjUgoBttiNk8w,509
99
+ eva/core/utils/io/gz.py,sha256=xxDkOUV2TFEK8pT7j6S_6iSzUUUmXN-sTum-gRuhij0,919
99
100
  eva/core/utils/memory.py,sha256=ZvcbS1eUPXdHIoL8ctFU56_-cyUniObBmIctUbvso48,636
100
- eva/core/utils/multiprocessing.py,sha256=PxUxMyvI62lghyWF46O5RNL-J7DUR2IrXSwdkbhC0ic,1383
101
+ eva/core/utils/multiprocessing.py,sha256=BWX8AW_KPLgIIlbsPG1kYdtbHPx6Dklw13bu4u84pF0,3006
101
102
  eva/core/utils/operations.py,sha256=eoC_ScuHUMDCuk08j1bosiQZdPrgiIODqqheR9MtJHQ,641
102
103
  eva/core/utils/parser.py,sha256=2czmwEGJJ6PtmaD86s9I14P-_sek4DmDCkEatRGT5sI,725
103
104
  eva/core/utils/progress_bar.py,sha256=KvvsM_v3_Fhb4JvbEEPHb4PJMokg6mNLj-o6dkfzcMc,499
@@ -107,7 +108,7 @@ eva/vision/callbacks/__init__.py,sha256=su1V73L0dDVYWSyvV_lnWbszDi2KikRraF7Osgea
107
108
  eva/vision/callbacks/loggers/__init__.py,sha256=td1JRJbE08nsGIZdO64_yLC3FUuMDp0kma0HjpUdXT4,161
108
109
  eva/vision/callbacks/loggers/batch/__init__.py,sha256=DVYP7Aonbi4wg_ERHRj_8kb87Ee_75wRZzdduJ_icQk,173
109
110
  eva/vision/callbacks/loggers/batch/base.py,sha256=hcAd5iiHvjZ0DIf4Qt4ENT54D6ky_1OO4rKQZqeo-1k,3628
110
- eva/vision/callbacks/loggers/batch/segmentation.py,sha256=PbgBVp6TGgko7Um8gN0fHyCs2sE42Uqe3M4grxSBykE,6749
111
+ eva/vision/callbacks/loggers/batch/segmentation.py,sha256=GYh2kfexW5pUZ0BdApYJI3e8xsuNkjIzkj5jnuKtHR4,6886
111
112
  eva/vision/data/__init__.py,sha256=aoKPmX8P2Q2k2W3nlq8vFU41FV6Sze-0SDuWtU-ETh4,111
112
113
  eva/vision/data/datasets/__init__.py,sha256=COhMRB9QJcjfbmfpRcYEztDwN9pl7IJNiH29pCZo4CA,908
113
114
  eva/vision/data/datasets/_utils.py,sha256=epPcaYE4w2_LtUKLLQJh6qQxUNVBe22JA06k4WUerYQ,1430
@@ -130,7 +131,7 @@ eva/vision/data/datasets/segmentation/embeddings.py,sha256=0KaadzPxN6OrKNnFu3YsG
130
131
  eva/vision/data/datasets/segmentation/lits.py,sha256=_R5AGFX8jVPwK3UKaYQfIRLBpM5ZmDg6KRziisUDYps,7175
131
132
  eva/vision/data/datasets/segmentation/lits_balanced.py,sha256=s5kPfqB41Vkcm5Jh34mLAO0NweMSIlV2fMXJsRjJsF8,3384
132
133
  eva/vision/data/datasets/segmentation/monusac.py,sha256=OTWHAD1b48WeT6phVf466w_nJUOGdBCGKWiWw68PAdw,8423
133
- eva/vision/data/datasets/segmentation/total_segmentator_2d.py,sha256=pqrNRQu5kbKSd-l5jwaiE67qyF2jLQ3JrO7TjhGGF7w,13098
134
+ eva/vision/data/datasets/segmentation/total_segmentator_2d.py,sha256=oyb38pAV8GN5Ph1-NSVooFhNP1TfOuXSUjUIAf6rdiY,14376
134
135
  eva/vision/data/datasets/structs.py,sha256=RaTDW-B36PumcR5gymhCiX-r8GiKqIFcjqoEEjjFyUE,389
135
136
  eva/vision/data/datasets/vision.py,sha256=hKKFMb65UJQzOyYm8FTGkOGBOinMRu7R8sOFMbCmQX4,1100
136
137
  eva/vision/data/datasets/wsi.py,sha256=-rypkcd6CPBM_oPuLszUx9q4zSPzeO1H6JKqvOtLlHw,8282
@@ -171,7 +172,7 @@ eva/vision/metrics/segmentation/generalized_dice.py,sha256=FqFzo7YWBwSlihmlgQg-O
171
172
  eva/vision/metrics/segmentation/mean_iou.py,sha256=xR3wQOHT77SNKTRRPdDaWpJ88qgk9PIBT5n2lnKTUfM,2161
172
173
  eva/vision/models/__init__.py,sha256=a-P6JL73A3miHQnqgqUz07XtVmQB_o4DqPImk5rEATo,275
173
174
  eva/vision/models/modules/__init__.py,sha256=vaM_V6OF2s0lYjralP8dzv8mAtv_xIMZItfXgz0NZg8,156
174
- eva/vision/models/modules/semantic_segmentation.py,sha256=VrzQemVgJbbXVQVzvvDjRaeXNhHsWDXRmTXGZhe-VCo,6389
175
+ eva/vision/models/modules/semantic_segmentation.py,sha256=i1hYgWnVLf7RTzt_ZrO76bQYNOKmyjt_Hl8AFQqFhAk,6400
175
176
  eva/vision/models/networks/__init__.py,sha256=j43IurizNlAyKPH2jwDHaeq49L2QvwbHWqUaptA1mG4,100
176
177
  eva/vision/models/networks/abmil.py,sha256=N1eH4fn1nXmgXurSQyQIxxonv7nsqeeuPWaQSHeltfs,6796
177
178
  eva/vision/models/networks/backbones/__init__.py,sha256=LsMx92eEoCQ5aNVFp7mHjrD-9ZeNawMiK6zZSYzl_PU,296
@@ -203,7 +204,7 @@ eva/vision/models/wrappers/__init__.py,sha256=8MT8qFM4nUXGpK1_i3rp70ODkOjn2KhhRo
203
204
  eva/vision/models/wrappers/from_registry.py,sha256=gdnxyg9drqlxfTNuS3aLbWGbZIwX1VNl0uudfjzVsXM,1614
204
205
  eva/vision/models/wrappers/from_timm.py,sha256=Z38Nb1i6OPKkgvFZOvGx-O3AZQuscf1zRVyrEBXQdJg,2320
205
206
  eva/vision/utils/__init__.py,sha256=vaUovprE743SmyFH8l6uk4pYSWpI4zxn7lN0EwePTJI,96
206
- eva/vision/utils/colormap.py,sha256=P904auPzaxGESTjFcbv550fc49DeXklSHkuhXWFXCEo,2384
207
+ eva/vision/utils/colormap.py,sha256=sP1F0JCX3abZfFgdxEjLJO-LhNYKjXZvXxs03ZgrEvI,2876
207
208
  eva/vision/utils/convert.py,sha256=fqGmKrg5-JJLrTkTXB4YDcWTudXPrO1gGjsckVRUesU,1881
208
209
  eva/vision/utils/io/__init__.py,sha256=XGJ_W94DVEYXJ_tVpr_20NMpR5JLWEWHGF3v9Low79A,610
209
210
  eva/vision/utils/io/_utils.py,sha256=JzOt7Frj6ScF_aNjFtfHBn4ROnl6NhUZucmQhLc4Cww,768
@@ -211,8 +212,8 @@ eva/vision/utils/io/image.py,sha256=IdOkr5MYqhYHz8U9drZ7wULTM3YHwCWSjZlu_Qdl4GQ,
211
212
  eva/vision/utils/io/mat.py,sha256=qpGifyjmpE0Xhv567Si7-zxKrgkgE0sywP70cHiLFGU,808
212
213
  eva/vision/utils/io/nifti.py,sha256=Q8Cd-ovqGZbevqfhb4waS6xI5xV3DXoWnDd5rhzLRNU,2595
213
214
  eva/vision/utils/io/text.py,sha256=qYgfo_ZaDZWfG02NkVVYzo5QFySqdCCz5uLA9d-zXtI,701
214
- kaiko_eva-0.1.3.dist-info/METADATA,sha256=xgKnK4lR6GSdWW0oB52wY7spKYlq_jq19AAjJREHpBg,24869
215
- kaiko_eva-0.1.3.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
216
- kaiko_eva-0.1.3.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
217
- kaiko_eva-0.1.3.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
218
- kaiko_eva-0.1.3.dist-info/RECORD,,
215
+ kaiko_eva-0.1.5.dist-info/METADATA,sha256=ZBbW8rELBL5PJ9ijkVItgchuJTjRCsbyAtBjM-un3e0,24869
216
+ kaiko_eva-0.1.5.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
217
+ kaiko_eva-0.1.5.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
218
+ kaiko_eva-0.1.5.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
219
+ kaiko_eva-0.1.5.dist-info/RECORD,,