kaiko-eva 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaiko-eva might be problematic. Click here for more details.

@@ -129,7 +129,10 @@ class SessionRecorder:
129
129
  def _save_config(self) -> None:
130
130
  """Saves the config yaml with resolved env placeholders to the output directory."""
131
131
  if self.config_path:
132
- config = OmegaConf.load(self.config_path)
132
+ config_fs = cloud_io.get_filesystem(self.config_path)
133
+ with config_fs.open(self.config_path, "r") as config_file:
134
+ config = OmegaConf.load(config_file) # type: ignore
135
+
133
136
  fs = cloud_io.get_filesystem(self._output_dir, anon=False)
134
137
  with fs.open(os.path.join(self._output_dir, self._config_file), "w") as file:
135
138
  config_yaml = OmegaConf.to_yaml(config, resolve=True)
@@ -1,6 +1,7 @@
1
1
  """"Neural Network Semantic Segmentation Module."""
2
2
 
3
- from typing import Any, Callable, Dict, Iterable, List
3
+ import functools
4
+ from typing import Any, Callable, Dict, Iterable, List, Tuple
4
5
 
5
6
  import torch
6
7
  from lightning.pytorch.cli import LRSchedulerCallable, OptimizerCallable
@@ -35,6 +36,7 @@ class SemanticSegmentationModule(module.ModelModule):
35
36
  metrics: metrics_lib.MetricsSchema | None = None,
36
37
  postprocess: batch_postprocess.BatchPostProcess | None = None,
37
38
  save_decoder_only: bool = True,
39
+ spatial_dims: int = 2,
38
40
  ) -> None:
39
41
  """Initializes the neural net head module.
40
42
 
@@ -57,6 +59,8 @@ class SemanticSegmentationModule(module.ModelModule):
57
59
  predictions and targets.
58
60
  save_decoder_only: Whether to save only the decoder during checkpointing. If False,
59
61
  will also save the encoder (not recommended when frozen).
62
+ spatial_dims: The number of spatial dimensions, 2 for 2D
63
+ and 3 for 3D segmentation.
60
64
  """
61
65
  super().__init__(metrics=metrics, postprocess=postprocess)
62
66
 
@@ -68,6 +72,7 @@ class SemanticSegmentationModule(module.ModelModule):
68
72
  self.lr_scheduler = lr_scheduler
69
73
  self.save_decoder_only = save_decoder_only
70
74
  self.inferer = inferer
75
+ self.spatial_dims = spatial_dims
71
76
 
72
77
  @override
73
78
  def configure_model(self) -> None:
@@ -111,13 +116,14 @@ class SemanticSegmentationModule(module.ModelModule):
111
116
  def forward(
112
117
  self,
113
118
  tensor: torch.Tensor,
119
+ to_size: Tuple[int, int],
114
120
  *args: Any,
115
121
  **kwargs: Any,
116
122
  ) -> torch.Tensor:
117
123
  return (
118
- self.inferer(tensor, network=self._forward_networks)
124
+ self.inferer(tensor, network=functools.partial(self._forward_networks, to_size=to_size))
119
125
  if self.inferer is not None and not self.training
120
- else self._forward_networks(tensor)
126
+ else self._forward_networks(tensor, to_size=to_size)
121
127
  )
122
128
 
123
129
  @override
@@ -168,7 +174,7 @@ class SemanticSegmentationModule(module.ModelModule):
168
174
  The batch step output.
169
175
  """
170
176
  data, targets, metadata = INPUT_TENSOR_BATCH(*batch)
171
- predictions = self(data)
177
+ predictions = self(data, to_size=targets.shape[-self.spatial_dims :])
172
178
  loss = self.criterion(predictions, targets)
173
179
  return {
174
180
  "loss": loss,
@@ -177,12 +183,11 @@ class SemanticSegmentationModule(module.ModelModule):
177
183
  "metadata": metadata,
178
184
  }
179
185
 
180
- def _forward_networks(self, tensor: torch.Tensor) -> torch.Tensor:
186
+ def _forward_networks(self, tensor: torch.Tensor, to_size: Tuple[int, int]) -> torch.Tensor:
181
187
  """Passes the input tensor through the encoder and decoder."""
182
188
  features = self.encoder(tensor) if self.encoder else tensor
183
189
  if isinstance(self.decoder, segmentation.Decoder):
184
190
  if not isinstance(features, list):
185
191
  raise ValueError(f"Expected a list of feature map tensors, got {type(features)}.")
186
- image_size = (tensor.shape[-2], tensor.shape[-1])
187
- return self.decoder(DecoderInputs(features, image_size, tensor))
192
+ return self.decoder(DecoderInputs(features, to_size, tensor))
188
193
  return self.decoder(features)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kaiko-eva
3
- Version: 0.2.1
3
+ Version: 0.2.2
4
4
  Summary: Evaluation Framework for oncology foundation models.
5
5
  Keywords: machine-learning,evaluation-framework,oncology,foundation-models
6
6
  Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
@@ -293,7 +293,7 @@ Check out the [documentation](https://kaiko-ai.github.io/eva/) for more informat
293
293
 
294
294
  ### Highlights:
295
295
  - Easy and reliable benchmark of Oncology FMs
296
- - Supports path-level classification, slide-level classification and semantic segmentation downstream tasks
296
+ - Supports patch-level classification, slide-level classification and semantic segmentation downstream tasks
297
297
  - Automatic embedding inference and evaluation of a downstream task
298
298
  - Native support of popular medical [datasets](https://kaiko-ai.github.io/eva/dev/datasets/) and models
299
299
  - Produce statistics over multiple evaluation fits and multiple metrics
@@ -448,26 +448,28 @@ input, resulting in a faster evaluation.
448
448
  Here are some examples to get you started:
449
449
 
450
450
  - Perform a downstream offline **classification** evaluation of `DINO ViT-S/16`
451
- on the `BACH` dataset with linear probing by first inferring the embeddings
452
- and then performing 5 sequential fits:
451
+ on the `BACH` dataset with linear probing by first pre-calculating the embeddings:
453
452
  ```sh
454
- export DOWNLOAD_DATA=true
455
- eva predict_fit --config https://raw.githubusercontent.com/kaiko-ai/eva/main/configs/vision/dino_vit/offline/bach.yaml
453
+ DOWNLOAD_DATA=true \
454
+ MODEL_NAME=universal/vit_small_patch16_224_dino \
455
+ eva predict_fit --config https://raw.githubusercontent.com/kaiko-ai/eva/main/configs/vision/pathology/offline/classification/bach.yaml
456
456
  ```
457
457
 
458
- - Perform a downstream online **segmentation** evaluation of `DINO ViT-S/16` on the
459
- `MoNuSAC` dataset with the `ConvDecoderMS` decoder:
458
+ - Perform a downstream online **segmentation** evaluation of `DINO ViT-S/16` on the `MoNuSAC` dataset with the `ConvDecoderWithImage` decoder:
460
459
  ```sh
461
- export DOWNLOAD_DATA=true
462
- eva fit --config https://raw.githubusercontent.com/kaiko-ai/eva/main/configs/vision/dino_vit/online/monusac.yaml
460
+ DOWNLOAD_DATA=true \
461
+ MODEL_NAME=universal/vit_small_patch16_224_dino \
462
+ eva fit --config https://raw.githubusercontent.com/kaiko-ai/eva/main/configs/vision/pathology/online/segmentation/monusac.yaml
463
463
  ```
464
464
 
465
+ By default `eva` will perform 5 evaluation runs using different seeds, however, you can control the number of runs through the `N_RUNS` environment variable or in the configuration file. The results will be saved to `./logs` by default, or to `OUTPUT_ROOT` if specified.
466
+
465
467
  For more examples, take a look at the [configs](https://github.com/kaiko-ai/eva/tree/main/configs)
466
- and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate_evaluations/).
468
+ and [tutorials](https://kaiko-ai.github.io/eva/main/user-guide/advanced/replicate_evaluations/).
467
469
 
468
470
  > [!NOTE]
469
471
  > All the datasets that support automatic download in the repo have by default the option to automatically download set to false.
470
- > For automatic download you have to manually set the environmental variable `DOWNLOAD_DATA=true` or in the configuration file `download=true`.
472
+ > For automatic download you have to manually set the environment variable `DOWNLOAD_DATA=true` or in the configuration file `download=true`.
471
473
 
472
474
  ## Leaderboards
473
475
 
@@ -96,7 +96,7 @@ eva/core/models/wrappers/huggingface.py,sha256=5CoNieivdjwvoawo7dZtWfYZkW-Mey1j0
96
96
  eva/core/models/wrappers/onnx.py,sha256=-iV-IlczTvTTEQuJycZeSVWdSl2kVJXc1eeRLgQQZ7Q,1834
97
97
  eva/core/trainers/__init__.py,sha256=jhsKJF7HAae7EOiG3gKIAHH_h3dZlTE2JRcCHJmOzJc,208
98
98
  eva/core/trainers/_logging.py,sha256=gi4FqPy2GuVmh0WZY6mYwF7zMPvnoFA050B0XdCP6PU,2571
99
- eva/core/trainers/_recorder.py,sha256=y6i5hfXftWjeV3eQHmMjUOkWumnZ2QNv_u275LLmvPA,7702
99
+ eva/core/trainers/_recorder.py,sha256=uD17l_WVveFuWuann59VU9iJ-Jumdh9F6vnAcL3M_FU,7855
100
100
  eva/core/trainers/_utils.py,sha256=M3h8lVhUmkeSiEXpX9hRdMvThGFCnTP15gv-hd1CZkc,321
101
101
  eva/core/trainers/functional.py,sha256=rLtQZw8TcAa4NYIf901TmoQiJDNm4RGVLN-64nku3Jo,4445
102
102
  eva/core/trainers/trainer.py,sha256=a3OwLWOZKDqxayrd0ugUmxJKyQx6XDb4GHtdL8-AEV0,4826
@@ -201,7 +201,7 @@ eva/vision/metrics/wrappers/__init__.py,sha256=V4z3hradMa6CQgTkk1bc2cbZzCgcoIYw7
201
201
  eva/vision/metrics/wrappers/monai.py,sha256=FNa1yHN2U3vO6BGqS0BFm8uJAL6DCzSE4XOFCV4aBjg,885
202
202
  eva/vision/models/__init__.py,sha256=a-P6JL73A3miHQnqgqUz07XtVmQB_o4DqPImk5rEATo,275
203
203
  eva/vision/models/modules/__init__.py,sha256=vaM_V6OF2s0lYjralP8dzv8mAtv_xIMZItfXgz0NZg8,156
204
- eva/vision/models/modules/semantic_segmentation.py,sha256=eXRx7wXKDLqMYHGj9IH_6WxlQNYaYEU6J70soVFedp0,7629
204
+ eva/vision/models/modules/semantic_segmentation.py,sha256=f04QwxSt8x9oVHf5JMeN5b_PMPmfLcso_icDBma1ToE,7930
205
205
  eva/vision/models/networks/__init__.py,sha256=j43IurizNlAyKPH2jwDHaeq49L2QvwbHWqUaptA1mG4,100
206
206
  eva/vision/models/networks/abmil.py,sha256=N1eH4fn1nXmgXurSQyQIxxonv7nsqeeuPWaQSHeltfs,6796
207
207
  eva/vision/models/networks/backbones/__init__.py,sha256=mvYVtmJOvYLCXDX52hP6dzQxj9cQikwSeBZvEDNyNmU,347
@@ -246,8 +246,8 @@ eva/vision/utils/io/image.py,sha256=IdOkr5MYqhYHz8U9drZ7wULTM3YHwCWSjZlu_Qdl4GQ,
246
246
  eva/vision/utils/io/mat.py,sha256=qpGifyjmpE0Xhv567Si7-zxKrgkgE0sywP70cHiLFGU,808
247
247
  eva/vision/utils/io/nifti.py,sha256=TFMgNhLqIK3sl3RjIRXEABM7FmSQjqVOwk1vXkuvX2w,4983
248
248
  eva/vision/utils/io/text.py,sha256=qYgfo_ZaDZWfG02NkVVYzo5QFySqdCCz5uLA9d-zXtI,701
249
- kaiko_eva-0.2.1.dist-info/METADATA,sha256=78-RgtBLumKmrWLlv6Q8iJ6JU-InxPCudfJcuy7pVUQ,24992
250
- kaiko_eva-0.2.1.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
251
- kaiko_eva-0.2.1.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
252
- kaiko_eva-0.2.1.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
253
- kaiko_eva-0.2.1.dist-info/RECORD,,
249
+ kaiko_eva-0.2.2.dist-info/METADATA,sha256=hiFFWrNu2fMZd7VLI08q4EDOc0IU6X4T00RGkHC0QT8,25363
250
+ kaiko_eva-0.2.2.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
251
+ kaiko_eva-0.2.2.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
252
+ kaiko_eva-0.2.2.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
253
+ kaiko_eva-0.2.2.dist-info/RECORD,,