kaiko-eva 0.1.0__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaiko-eva might be problematic. Click here for more details.

Files changed (63) hide show
  1. eva/core/callbacks/writers/embeddings/base.py +3 -4
  2. eva/core/data/dataloaders/dataloader.py +2 -2
  3. eva/core/data/splitting/random.py +6 -5
  4. eva/core/data/splitting/stratified.py +12 -6
  5. eva/core/losses/__init__.py +5 -0
  6. eva/core/losses/cross_entropy.py +27 -0
  7. eva/core/metrics/__init__.py +0 -4
  8. eva/core/metrics/defaults/__init__.py +0 -2
  9. eva/core/models/modules/module.py +9 -9
  10. eva/core/models/transforms/extract_cls_features.py +17 -9
  11. eva/core/models/transforms/extract_patch_features.py +23 -11
  12. eva/core/utils/progress_bar.py +15 -0
  13. eva/vision/data/datasets/__init__.py +4 -0
  14. eva/vision/data/datasets/classification/__init__.py +2 -1
  15. eva/vision/data/datasets/classification/camelyon16.py +4 -1
  16. eva/vision/data/datasets/classification/panda.py +17 -1
  17. eva/vision/data/datasets/classification/wsi.py +4 -1
  18. eva/vision/data/datasets/segmentation/__init__.py +2 -0
  19. eva/vision/data/datasets/segmentation/consep.py +2 -2
  20. eva/vision/data/datasets/segmentation/lits.py +49 -29
  21. eva/vision/data/datasets/segmentation/lits_balanced.py +93 -0
  22. eva/vision/data/datasets/segmentation/monusac.py +7 -7
  23. eva/vision/data/datasets/segmentation/total_segmentator_2d.py +2 -2
  24. eva/vision/data/datasets/wsi.py +37 -1
  25. eva/vision/data/wsi/patching/coordinates.py +9 -1
  26. eva/vision/data/wsi/patching/samplers/_utils.py +2 -8
  27. eva/vision/data/wsi/patching/samplers/random.py +4 -2
  28. eva/vision/losses/__init__.py +2 -2
  29. eva/vision/losses/dice.py +75 -8
  30. eva/vision/metrics/__init__.py +11 -0
  31. eva/vision/metrics/defaults/__init__.py +7 -0
  32. eva/{core → vision}/metrics/defaults/segmentation/__init__.py +1 -1
  33. eva/{core → vision}/metrics/defaults/segmentation/multiclass.py +2 -1
  34. eva/vision/metrics/segmentation/BUILD +1 -0
  35. eva/vision/metrics/segmentation/__init__.py +9 -0
  36. eva/vision/metrics/segmentation/_utils.py +69 -0
  37. eva/{core/metrics → vision/metrics/segmentation}/generalized_dice.py +12 -10
  38. eva/vision/metrics/segmentation/mean_iou.py +57 -0
  39. eva/vision/models/modules/semantic_segmentation.py +4 -3
  40. eva/vision/models/networks/backbones/_utils.py +12 -0
  41. eva/vision/models/networks/backbones/pathology/__init__.py +4 -1
  42. eva/vision/models/networks/backbones/pathology/histai.py +8 -2
  43. eva/vision/models/networks/backbones/pathology/mahmood.py +2 -9
  44. eva/vision/models/networks/backbones/pathology/owkin.py +14 -0
  45. eva/vision/models/networks/backbones/pathology/paige.py +51 -0
  46. eva/vision/models/networks/decoders/__init__.py +1 -1
  47. eva/vision/models/networks/decoders/segmentation/__init__.py +12 -4
  48. eva/vision/models/networks/decoders/segmentation/base.py +16 -0
  49. eva/vision/models/networks/decoders/segmentation/{conv2d.py → decoder2d.py} +26 -22
  50. eva/vision/models/networks/decoders/segmentation/linear.py +2 -2
  51. eva/vision/models/networks/decoders/segmentation/semantic/__init__.py +12 -0
  52. eva/vision/models/networks/decoders/segmentation/{common.py → semantic/common.py} +3 -3
  53. eva/vision/models/networks/decoders/segmentation/semantic/with_image.py +94 -0
  54. eva/vision/models/networks/decoders/segmentation/typings.py +18 -0
  55. eva/vision/utils/io/__init__.py +7 -1
  56. eva/vision/utils/io/nifti.py +19 -4
  57. {kaiko_eva-0.1.0.dist-info → kaiko_eva-0.1.3.dist-info}/METADATA +3 -34
  58. {kaiko_eva-0.1.0.dist-info → kaiko_eva-0.1.3.dist-info}/RECORD +61 -48
  59. {kaiko_eva-0.1.0.dist-info → kaiko_eva-0.1.3.dist-info}/WHEEL +1 -1
  60. eva/core/metrics/mean_iou.py +0 -120
  61. eva/vision/models/networks/decoders/decoder.py +0 -7
  62. {kaiko_eva-0.1.0.dist-info → kaiko_eva-0.1.3.dist-info}/entry_points.txt +0 -0
  63. {kaiko_eva-0.1.0.dist-info → kaiko_eva-0.1.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,94 @@
1
+ """Convolutional semantic segmentation decoders that use input image & feature maps as input."""
2
+
3
+ from typing import List
4
+
5
+ import torch
6
+ from torch import nn
7
+ from torchvision.transforms.functional import rgb_to_grayscale
8
+ from typing_extensions import override
9
+
10
+ from eva.vision.models.networks.decoders.segmentation import decoder2d
11
+ from eva.vision.models.networks.decoders.segmentation.typings import DecoderInputs
12
+
13
+
14
+ class ConvDecoderWithImage(decoder2d.Decoder2D):
15
+ """A convolutional that in addition to encoded features, also takes the input image as input.
16
+
17
+ In a first stage, the input features are upsampled and passed through a convolutional layer,
18
+ while in the second stage, the input image channels are concatenated with the upsampled features
19
+ and passed through additional convolutional blocks in order to combine the image prior
20
+ information with the encoded features. Lastly, a 1x1 conv operation reduces the number of
21
+ channels to the number of classes.
22
+ """
23
+
24
+ _default_hidden_dims = [64, 32, 32]
25
+
26
+ def __init__(
27
+ self,
28
+ in_features: int,
29
+ num_classes: int,
30
+ greyscale: bool = False,
31
+ hidden_dims: List[int] | None = None,
32
+ ) -> None:
33
+ """Initializes the decoder.
34
+
35
+ Args:
36
+ in_features: The hidden dimension size of the embeddings.
37
+ num_classes: Number of output classes as channels.
38
+ greyscale: Whether to convert input images to greyscale.
39
+ hidden_dims: List of hidden dimensions for the convolutional layers.
40
+ """
41
+ hidden_dims = hidden_dims or self._default_hidden_dims
42
+ if len(hidden_dims) != 3:
43
+ raise ValueError("Hidden dims must have 3 elements.")
44
+
45
+ super().__init__(
46
+ layers=nn.Sequential(
47
+ nn.Upsample(scale_factor=2),
48
+ Conv2dBnReLU(in_features, hidden_dims[0]),
49
+ )
50
+ )
51
+ self.greyscale = greyscale
52
+
53
+ additional_hidden_dims = 1 if greyscale else 3
54
+ self.image_block = nn.Sequential(
55
+ Conv2dBnReLU(hidden_dims[0] + additional_hidden_dims, hidden_dims[1]),
56
+ Conv2dBnReLU(hidden_dims[1], hidden_dims[2]),
57
+ )
58
+
59
+ self.classifier = nn.Conv2d(hidden_dims[2], num_classes, kernel_size=1)
60
+
61
+ @override
62
+ def forward(self, decoder_inputs: DecoderInputs) -> torch.Tensor:
63
+ if decoder_inputs.images is None:
64
+ raise ValueError("Input images are missing.")
65
+
66
+ logits = super().forward(decoder_inputs)
67
+ in_images = (
68
+ rgb_to_grayscale(decoder_inputs.images) if self.greyscale else decoder_inputs.images
69
+ )
70
+ logits = torch.cat([logits, in_images], dim=1)
71
+ logits = self.image_block(logits)
72
+
73
+ return self.classifier(logits)
74
+
75
+
76
+ class Conv2dBnReLU(nn.Sequential):
77
+ """A single convolutional layer with batch normalization and ReLU activation."""
78
+
79
+ def __init__(
80
+ self, in_channels: int, out_channels: int, kernel_size: int = 3, padding: int = 1
81
+ ) -> None:
82
+ """Initializes the layer.
83
+
84
+ Args:
85
+ in_channels: Number of input channels.
86
+ out_channels: Number of output channels.
87
+ kernel_size: Size of the convolutional kernel.
88
+ padding: Padding size for the convolutional layer.
89
+ """
90
+ super().__init__(
91
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding),
92
+ nn.BatchNorm2d(out_channels),
93
+ nn.ReLU(inplace=True),
94
+ )
@@ -0,0 +1,18 @@
1
+ """Type-hints for segmentation decoders."""
2
+
3
+ from typing import List, NamedTuple, Tuple
4
+
5
+ import torch
6
+
7
+
8
+ class DecoderInputs(NamedTuple):
9
+ """Input scheme for segmentation decoders."""
10
+
11
+ features: List[torch.Tensor]
12
+ """List of image features generated by the encoder from the original images."""
13
+
14
+ image_size: Tuple[int, int]
15
+ """Size of the original input images to be used for upsampling."""
16
+
17
+ images: torch.Tensor | None = None
18
+ """The original input images for which the encoder generated the encoded_images."""
@@ -2,7 +2,12 @@
2
2
 
3
3
  from eva.vision.utils.io.image import read_image, read_image_as_array, read_image_as_tensor
4
4
  from eva.vision.utils.io.mat import read_mat, save_mat
5
- from eva.vision.utils.io.nifti import fetch_nifti_shape, read_nifti, save_array_as_nifti
5
+ from eva.vision.utils.io.nifti import (
6
+ fetch_nifti_axis_direction_code,
7
+ fetch_nifti_shape,
8
+ read_nifti,
9
+ save_array_as_nifti,
10
+ )
6
11
  from eva.vision.utils.io.text import read_csv
7
12
 
8
13
  __all__ = [
@@ -10,6 +15,7 @@ __all__ = [
10
15
  "read_image_as_array",
11
16
  "read_image_as_tensor",
12
17
  "fetch_nifti_shape",
18
+ "fetch_nifti_axis_direction_code",
13
19
  "read_nifti",
14
20
  "save_array_as_nifti",
15
21
  "read_csv",
@@ -5,6 +5,7 @@ from typing import Any, Tuple
5
5
  import nibabel as nib
6
6
  import numpy as np
7
7
  import numpy.typing as npt
8
+ from nibabel import orientations
8
9
 
9
10
  from eva.vision.utils.io import _utils
10
11
 
@@ -28,13 +29,13 @@ def read_nifti(
28
29
  ValueError: If the input channel is invalid for the image.
29
30
  """
30
31
  _utils.check_file(path)
31
- image_data = nib.load(path) # type: ignore
32
+ image_data: nib.Nifti1Image = nib.load(path) # type: ignore
32
33
  if slice_index is not None:
33
- image_data = image_data.slicer[:, :, slice_index : slice_index + 1] # type: ignore
34
+ image_data = image_data.slicer[:, :, slice_index : slice_index + 1]
34
35
 
35
- image_array = image_data.get_fdata() # type: ignore
36
+ image_array = image_data.get_fdata()
36
37
  if use_storage_dtype:
37
- image_array = image_array.astype(image_data.get_data_dtype()) # type: ignore
38
+ image_array = image_array.astype(image_data.get_data_dtype())
38
39
 
39
40
  return image_array
40
41
 
@@ -73,3 +74,17 @@ def fetch_nifti_shape(path: str) -> Tuple[int]:
73
74
  _utils.check_file(path)
74
75
  image = nib.load(path) # type: ignore
75
76
  return image.header.get_data_shape() # type: ignore
77
+
78
+
79
+ def fetch_nifti_axis_direction_code(path: str) -> str:
80
+ """Fetches the NIfTI axis direction code from a file.
81
+
82
+ Args:
83
+ path: The path to the NIfTI file.
84
+
85
+ Returns:
86
+ The axis direction codes as string (e.g. "LAS").
87
+ """
88
+ _utils.check_file(path)
89
+ image_data: nib.Nifti1Image = nib.load(path) # type: ignore
90
+ return "".join(orientations.aff2axcodes(image_data.affine))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kaiko-eva
3
- Version: 0.1.0
3
+ Version: 0.1.3
4
4
  Summary: Evaluation Framework for oncology foundation models.
5
5
  Keywords: machine-learning,evaluation-framework,oncology,foundation-models
6
6
  Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
@@ -468,41 +468,10 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate
468
468
 
469
469
  ## Leaderboards
470
470
 
471
- In this section you will find model benchmarks which were generated with _`eva`_.
471
+ The following table shows the FMs we have evaluated with _`eva`_. For more detailed information about the evaluation process, please refer to our [documentation](https://kaiko-ai.github.io/eva/main/leaderboards/).
472
472
 
473
- ### Table I: WSI and microscopy image tasks
473
+ ![Pathology Leaderboard](./docs/images/leaderboard.svg)
474
474
 
475
- <br />
476
-
477
- <div align="center">
478
-
479
- | Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | CoNSeP | MoNuSAC |
480
- |---------|-------|-------|-------|--------|------------|-------|------------|-------|
481
- | ViT-S/16 _(random)_ <sup>[1]</sup> | 0.411|0.613|0.5|0.752|0.551|0.347|0.489|0.394|
482
- | ViT-S/16 _(ImageNet)_ <sup>[1]</sup> | 0.675|0.936|0.827|0.861|0.751|0.676|0.54|0.512|
483
- | DINO<sub>(p=16)</sub> <sup>[2]</sup> | 0.77|0.936|0.751|0.905|0.869|0.737|0.625|0.549|
484
- | Phikon <sup>[3]</sup> | 0.715|0.942|0.766|0.925|0.879|0.784|0.68|0.554|
485
- | UNI <sup>[4]</sup> | 0.797|0.95|0.835|0.939|0.933|0.774|0.67|0.575|
486
- | ViT-S/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.8|0.949|0.831|0.902|0.897|0.77|0.622|0.573|
487
- | ViT-S/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.825|0.948|0.826|0.887|0.879|0.741|0.677|0.617|
488
- | ViT-B/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.846|0.959|0.839|0.906|0.891|0.753|0.647|0.572|
489
- | ViT-B/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.867|0.952|0.814|0.921|0.939|0.761|0.706|0.661|
490
- | ViT-L/14 _(kaiko.ai)_ <sup>[5]</sup> | 0.862|0.935|0.822|0.907|0.941|0.769|0.686|0.599|
491
-
492
- _Table I: Linear probing evaluation of FMs on patch-level downstream datasets.<br> We report balanced accuracy
493
- for classification tasks and generalized Dice score for semgetnation tasks, averaged over 5 runs. Results are
494
- reported on the "test" split if available and otherwise on the "validation" split._
495
-
496
- </div>
497
-
498
- <br />
499
-
500
- _References_:
501
- 1. _"Emerging properties in self-supervised vision transformers”_, [arXiv](https://arxiv.org/abs/2104.14294)
502
- 2. _"Benchmarking self-supervised learning on diverse pathology datasets”_, [arXiv](https://arxiv.org/abs/2212.04690)
503
- 3. _"Scaling self-supervised learning for histopathology with masked image modeling”_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1)
504
- 4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_, [arXiv](https://arxiv.org/abs/2308.15474)
505
- 5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_, [arXiv](https://arxiv.org/pdf/2404.15217)
506
475
 
507
476
  ## Contributing
508
477
 
@@ -7,7 +7,7 @@ eva/core/callbacks/config.py,sha256=-DRt20a2aF9Z9-7nZvbGBcOZ30qNf3ESf25EPRgRL1w,
7
7
  eva/core/callbacks/writers/__init__.py,sha256=z8cNVJOanj-yYyIiX-mOkhur1NExuCOKzPjp4mmm3AE,232
8
8
  eva/core/callbacks/writers/embeddings/__init__.py,sha256=zMxP4POf1yFFUVSjGcsQgyPYNY6JsZO_F66ngKJZaG8,301
9
9
  eva/core/callbacks/writers/embeddings/_manifest.py,sha256=pB9nGv9ofVbnI4HHPrHY4p7jeFiFQTRc09zszto_DTk,2424
10
- eva/core/callbacks/writers/embeddings/base.py,sha256=rdoCtMuegiO6Gb0vM8a-KGI33Eq0hS0Qnlh-1Y6_96s,7593
10
+ eva/core/callbacks/writers/embeddings/base.py,sha256=YrjdqIFBYla-5jleUUTyJ5OHmvInhzLLHXEGmzwa9xA,7620
11
11
  eva/core/callbacks/writers/embeddings/classification.py,sha256=pYU0dD08IXH4_pK-P43LqCMD17X-AP5Ayo9gbovwv6U,4522
12
12
  eva/core/callbacks/writers/embeddings/segmentation.py,sha256=6AMGfD9Ur35TyH8ztjPx4ayB1Kbywymmu9LriMGLDoY,3135
13
13
  eva/core/callbacks/writers/embeddings/typings.py,sha256=qXZSlasaSKHad6HyJeRTeUv-ZeJVM-R3oIaasD8ZPc8,997
@@ -17,7 +17,7 @@ eva/core/cli/logo.py,sha256=x6-vGWI0s9gza-xxQrBDi2wneb2wFU_mQGHgpAiq2MQ,786
17
17
  eva/core/cli/setup.py,sha256=kR-7l4X5Hu8kSLoQZGYGIeLXtn9S_EU52dauDy6fm0w,2663
18
18
  eva/core/data/__init__.py,sha256=yG3BeOWhp1EjVYMFqx8M_TBWFDyfIwwksQGQmMdSPaI,340
19
19
  eva/core/data/dataloaders/__init__.py,sha256=fbNClVZ8J3QoGi4qiPq635ig1j9GdI7six3RhfwDbjY,110
20
- eva/core/data/dataloaders/dataloader.py,sha256=-mWFFLtem1Ijbi8XGveFSv5XzUU7SyKwiT5Ahikzghw,2368
20
+ eva/core/data/dataloaders/dataloader.py,sha256=n2NNlWKurEC1TVis347l6YmPgNOGTaoxwIyoClyPt7s,2383
21
21
  eva/core/data/datamodules/__init__.py,sha256=qZchYbgxo9lxYnGoqdk0C6MfS2IbF0WItO0kCdP9Mqc,229
22
22
  eva/core/data/datamodules/call.py,sha256=jjj9w3UXYuQB-qyCcw1EZpRJW10OC1I3dvgvsuQWLck,940
23
23
  eva/core/data/datamodules/datamodule.py,sha256=dclC2YJAXUGEUpV9ZRWQS43-ksFIPgVeFudsyrj9kdc,3878
@@ -32,8 +32,8 @@ eva/core/data/datasets/embeddings.py,sha256=zNEO8KxqiOopcN_lTjwtEAm9xbnYDSjOE8X2
32
32
  eva/core/data/samplers/__init__.py,sha256=WikBo1DemCx6o2vFfNwSwODlmCT2zWUXtCNwiWCVAFE,100
33
33
  eva/core/data/samplers/sampler.py,sha256=vrrXERWC67fjmTk_uwD7s9-8-rdhvnx7OlSipHE6sdY,119
34
34
  eva/core/data/splitting/__init__.py,sha256=VQJ8lfakbv6y2kAk3VDtITAvh7kcZo3H1JwJBc5jT08,198
35
- eva/core/data/splitting/random.py,sha256=gmweyGtL4rbWoUaH1q63LjKpT-TCwk2mdB4Vw_jLTQo,1353
36
- eva/core/data/splitting/stratified.py,sha256=_1Eh6QuupxLexrABpwNXiDxDisoTiK8axsV3nvQXCx0,1915
35
+ eva/core/data/splitting/random.py,sha256=r6iy7j34seRTlyB79_Xy7m6lsKRi8ZM9X5Ln1b-SBjg,1453
36
+ eva/core/data/splitting/stratified.py,sha256=dliRHgEyZTOzpJrR8FzaRaAnjUxu_VsZUuy_4MPQjlY,2265
37
37
  eva/core/data/transforms/__init__.py,sha256=n0TczmJSc9EjR6JezAZqlZIN4Gz_X3UBePbyDSC7JkE,308
38
38
  eva/core/data/transforms/dtype/__init__.py,sha256=r_LM_hdh_gTsrgh3shDTdMpu-lgQNHJ1yD6wY3omPyg,174
39
39
  eva/core/data/transforms/dtype/array.py,sha256=RDSkXlnSHSYyU_gv7vw33OZ7vhEy62PQGoE3htGGaqc,725
@@ -51,17 +51,15 @@ eva/core/loggers/log/image.py,sha256=iUwntQCdRNLtkSdqu8CvV34l06zPYVo4NAW2gUeiJIM
51
51
  eva/core/loggers/log/parameters.py,sha256=7Xi-I5gQvEVv71d58bwdZ-Hb4287NXxaUyMfriq_KDU,1634
52
52
  eva/core/loggers/log/utils.py,sha256=k4Q7uKpAQctfDv0EEYPnPv6wt9LnckEeqGvbYSLfKO0,415
53
53
  eva/core/loggers/loggers.py,sha256=igHxdxJSotWSg6nEOKnfFuBszzblHgi8T7sBrE00FEs,166
54
- eva/core/metrics/__init__.py,sha256=CtmUcB2bh-JlI0wOvSwwPFB1OzaqsSM3dPHVQh7hMXY,714
54
+ eva/core/losses/__init__.py,sha256=D-Mp9fUFFFoH9YYWntVH3B839zHS3GjFJzkbQThzj6Y,118
55
+ eva/core/losses/cross_entropy.py,sha256=Sunz7ogDAJpGvZtuk9cAxKZJBO08CKIEvbCoewEvees,862
56
+ eva/core/metrics/__init__.py,sha256=-9Qch4npEQpy3oF6NUhh9WinCmFBFe0D2eEYCR0S0xU,558
55
57
  eva/core/metrics/average_loss.py,sha256=AyFOnCXBD5T62eSYf6eGAAJsqt8x-KaHgc8OLkCHjzE,1267
56
58
  eva/core/metrics/binary_balanced_accuracy.py,sha256=MabsXAtVfLqSaSIIpE0HIM6bo8uRszl6obueHI6vJi0,806
57
- eva/core/metrics/defaults/__init__.py,sha256=uPQzkna6Lb0VnCtC4IEPSB9d5jI1_0SohjUOMSo3o1Q,373
59
+ eva/core/metrics/defaults/__init__.py,sha256=IeqLcoxLNddtuuFao5r85ZVxTyFP6iFsj1K8iXCTSes,255
58
60
  eva/core/metrics/defaults/classification/__init__.py,sha256=xMzE4yV8NoUdcmk2FCKohEUav1GJcxYn60S1KNgXbJY,316
59
61
  eva/core/metrics/defaults/classification/binary.py,sha256=9ll6ZOcNGQdsg7ln9DAQ0u-OzsXSzEbueXe-dVJkJZ8,2322
60
62
  eva/core/metrics/defaults/classification/multiclass.py,sha256=8Aesy_rKtp4KxfXJtDCmk6FsGxIFS4Ywu2CH1VIRL7M,2518
61
- eva/core/metrics/defaults/segmentation/__init__.py,sha256=n6gDc603uRWOByAAPFkmZiPH2rEoZ3lSV9MC4nRMBuc,189
62
- eva/core/metrics/defaults/segmentation/multiclass.py,sha256=_M7NtvwIrfzLLXtAYflFjIle6UeHYU9TwWo3IHl0wlw,1715
63
- eva/core/metrics/generalized_dice.py,sha256=28vTdmh6QyLfSGtT5oARXp2Hd58EBNg5G0dSBfctvcY,2271
64
- eva/core/metrics/mean_iou.py,sha256=eAvAe1BiYEXjOtWHUZD_5hBGuRmNhHVYuyGls8YC-1g,4619
65
63
  eva/core/metrics/structs/__init__.py,sha256=cvn7E4k5vJmpwJj_zezmtZa_Nl_RddDM1G-MO8TP0po,422
66
64
  eva/core/metrics/structs/collection.py,sha256=bNfCekHN8pzD49-YTqVxrmxFtiQfNxnv-RwkxCL6rbc,149
67
65
  eva/core/metrics/structs/metric.py,sha256=zdnE0ZVTSYAMl7rW_OL6e1XiZDvLTirYqV0lgJCleXY,109
@@ -72,7 +70,7 @@ eva/core/models/__init__.py,sha256=bQSpfQJKuDMWosjcMhP7t5jdOSV6OyxdxTOIW9w1woE,3
72
70
  eva/core/models/modules/__init__.py,sha256=QJWJ42BceXZBzDGgk5FHBcCaRrB9egTFKVF6gDsBYfM,255
73
71
  eva/core/models/modules/head.py,sha256=iHrEOjYfshFI6OdXxJJTZyfCoUs2fimitINNcB6ENsc,4321
74
72
  eva/core/models/modules/inference.py,sha256=ih-0Rr2oNf2N6maiXPOW7XH5KVwUT1_MOxnJKOhJ1uQ,978
75
- eva/core/models/modules/module.py,sha256=7mCzyvBNOWhvN8sNa91yB79iSBlJlYh9sypL37Nwdes,6836
73
+ eva/core/models/modules/module.py,sha256=LtjYxTZb7UY0owonmt_yQ5EySw3sX-xD9HLN2io8EK4,6697
76
74
  eva/core/models/modules/typings.py,sha256=yFMJCE4Nrfd8VEXU1zk8p6Sz5M7UslwitYPVC2OPLSY,776
77
75
  eva/core/models/modules/utils/__init__.py,sha256=pnbxlEhT87JimWNr-NSNCv7VNR-IyDi_A9qRWmvlzwQ,227
78
76
  eva/core/models/modules/utils/batch_postprocess.py,sha256=RwnDcjJy3uvVirpgx_80Q2CUYKfJKipVwjyX7AF2CKw,3088
@@ -80,8 +78,8 @@ eva/core/models/modules/utils/grad.py,sha256=bl8qb8g4Nhg1KAGfbEV_9HTKkoT0azRwfs9
80
78
  eva/core/models/networks/__init__.py,sha256=yqx6UmG1Eg3vb1O_tnK_axnJWabEl9ULkDWiPN440Xc,85
81
79
  eva/core/models/networks/mlp.py,sha256=thk-x4pviE3fCaMW9k3I2Oe5_DxfC-CqUrtolvVdXug,2418
82
80
  eva/core/models/transforms/__init__.py,sha256=oYL3gNUUKZFViTu6GT1jVE2Kv1xFYPuyiYp-sErtVVg,257
83
- eva/core/models/transforms/extract_cls_features.py,sha256=odtqawFoDZZCvCg0bp8G8PlUY8KrPAQBZsNOcTZv02E,1081
84
- eva/core/models/transforms/extract_patch_features.py,sha256=41zCkX-ls-rvqB4B4kE5_lWMNhec65yatdDNa0yjRf0,1751
81
+ eva/core/models/transforms/extract_cls_features.py,sha256=tFRd4H-eGFIGCfZt6wuZGibDmAoNXKSsn15bBw0IDdc,1482
82
+ eva/core/models/transforms/extract_patch_features.py,sha256=k50jTLPWxbfvciH9QZSzTAGqWwDSVpXAteme_Qg2d6E,2202
85
83
  eva/core/models/wrappers/__init__.py,sha256=P-ipr4NtKqPU6ubAjKLGxFf1Qt2yDSNtgS2Xz5sBahQ,364
86
84
  eva/core/models/wrappers/_utils.py,sha256=HXUyGcILaa8GK31ViIHCKRU4f9kbjAPYQmhvN2N7jSc,957
87
85
  eva/core/models/wrappers/base.py,sha256=xKMUSXk93wI67p_wmh7jujK-bxvIefO1noYaAJN_5Ak,1359
@@ -102,6 +100,7 @@ eva/core/utils/memory.py,sha256=ZvcbS1eUPXdHIoL8ctFU56_-cyUniObBmIctUbvso48,636
102
100
  eva/core/utils/multiprocessing.py,sha256=PxUxMyvI62lghyWF46O5RNL-J7DUR2IrXSwdkbhC0ic,1383
103
101
  eva/core/utils/operations.py,sha256=eoC_ScuHUMDCuk08j1bosiQZdPrgiIODqqheR9MtJHQ,641
104
102
  eva/core/utils/parser.py,sha256=2czmwEGJJ6PtmaD86s9I14P-_sek4DmDCkEatRGT5sI,725
103
+ eva/core/utils/progress_bar.py,sha256=KvvsM_v3_Fhb4JvbEEPHb4PJMokg6mNLj-o6dkfzcMc,499
105
104
  eva/core/utils/workers.py,sha256=hfx63M82qNg0Dwhre2tl53MnhtRsV7APaDONM9nhVB8,634
106
105
  eva/vision/__init__.py,sha256=oUZXFYjwtkWzi8An0uS5Xc84pLKintlXe2iti8zW6BQ,480
107
106
  eva/vision/callbacks/__init__.py,sha256=su1V73L0dDVYWSyvV_lnWbszDi2KikRraF7OsgeaKl4,139
@@ -110,30 +109,31 @@ eva/vision/callbacks/loggers/batch/__init__.py,sha256=DVYP7Aonbi4wg_ERHRj_8kb87E
110
109
  eva/vision/callbacks/loggers/batch/base.py,sha256=hcAd5iiHvjZ0DIf4Qt4ENT54D6ky_1OO4rKQZqeo-1k,3628
111
110
  eva/vision/callbacks/loggers/batch/segmentation.py,sha256=PbgBVp6TGgko7Um8gN0fHyCs2sE42Uqe3M4grxSBykE,6749
112
111
  eva/vision/data/__init__.py,sha256=aoKPmX8P2Q2k2W3nlq8vFU41FV6Sze-0SDuWtU-ETh4,111
113
- eva/vision/data/datasets/__init__.py,sha256=t0pZhs3z-QFHERY5N8FVMQex8TDVG5kfcpGODdUxk8Y,836
112
+ eva/vision/data/datasets/__init__.py,sha256=COhMRB9QJcjfbmfpRcYEztDwN9pl7IJNiH29pCZo4CA,908
114
113
  eva/vision/data/datasets/_utils.py,sha256=epPcaYE4w2_LtUKLLQJh6qQxUNVBe22JA06k4WUerYQ,1430
115
114
  eva/vision/data/datasets/_validators.py,sha256=77WZj8ewsuxUjW5WegJ-7zDuR6WdF5JbaOYdywhKIK4,2594
116
- eva/vision/data/datasets/classification/__init__.py,sha256=ht5UPPgP736dt_L1Hb5rJtQnzKJHIhpBnqm3b4BMCZE,663
115
+ eva/vision/data/datasets/classification/__init__.py,sha256=T2eg8k3xxd_Pdbrr7TGYICSo7BVOTMOs1bL-rLnMmro,693
117
116
  eva/vision/data/datasets/classification/bach.py,sha256=kZba1dQlJWZAmA03akJ4fVUU-y9W8ezOwlgs2zL-QrE,5432
118
117
  eva/vision/data/datasets/classification/base.py,sha256=Ci0HoOhOuHwICTi1TUGA1PwZe642RywolTVfMhKrFHk,2772
119
- eva/vision/data/datasets/classification/camelyon16.py,sha256=sToajukdw-_V_YO6lbcZToMSLKEjeKxJfjZ8iSdzn-M,8136
118
+ eva/vision/data/datasets/classification/camelyon16.py,sha256=sChvRo0jbOVUMJvfpsFxgFOsYgci3v9wjeMBEjUysJU,8287
120
119
  eva/vision/data/datasets/classification/crc.py,sha256=8qjz9OklLg1gAr46RKZdlClmlO9awwfp0dkTs8v5jTE,5670
121
120
  eva/vision/data/datasets/classification/mhist.py,sha256=xzShPncSfAV6Q5ojfimeq748MfA0n77fGWa9EpdRzYU,3055
122
- eva/vision/data/datasets/classification/panda.py,sha256=6VpCsotdksAZSfdD9zcM96Ihr6FshnIgZPZkkt0oSLI,6853
121
+ eva/vision/data/datasets/classification/panda.py,sha256=BU_gDoX3ZSDUugwaO2n0XSZhzseK1rkPoHMRoJLGL84,7303
123
122
  eva/vision/data/datasets/classification/patch_camelyon.py,sha256=fElKteZKx4M6AjylnhhgNH1jewHegWc1K8h4FFKp0gE,7171
124
- eva/vision/data/datasets/classification/wsi.py,sha256=Y8yaPM5qVi13YyRKIcYrRaxmV_yRW8Dl9rj_1kRJ33I,3948
125
- eva/vision/data/datasets/segmentation/__init__.py,sha256=_E1K8Ld829jVlZ0VcjUy0HP-8aHu4v9rEbTFt3R8O9M,694
123
+ eva/vision/data/datasets/classification/wsi.py,sha256=x3mQ8iwyiSdfQOjJuV7_cd8-LRjjhY9tjtzuD8O87Lg,4099
124
+ eva/vision/data/datasets/segmentation/__init__.py,sha256=hGNr7BM_StxvmlOKWWfHp615qgsrB6BB3qMOiYhE0Og,791
126
125
  eva/vision/data/datasets/segmentation/_utils.py,sha256=ps1qpuEkPgvwUw6H-KKaLaYqDBGmN7dNGk3bnS1l6sI,1261
127
126
  eva/vision/data/datasets/segmentation/base.py,sha256=11IMODMB7KJ8Bs5p7MyOsBXCyPFJXfYcDLAIMitUwEk,3023
128
127
  eva/vision/data/datasets/segmentation/bcss.py,sha256=NHjHd1tgIfIw6TxsZTGb63iMEwXFbWX_JAwRT5WVsj4,8274
129
- eva/vision/data/datasets/segmentation/consep.py,sha256=mUUGqS1HkUkL1u45LY0rEjcAK0Dawc8abUmFgYEZ_ag,5871
128
+ eva/vision/data/datasets/segmentation/consep.py,sha256=dCD8VsZSvI3-RbHHHAwGWfsNwOJCPkzHpVtzrcuAEVo,5871
130
129
  eva/vision/data/datasets/segmentation/embeddings.py,sha256=0KaadzPxN6OrKNnFu3YsGBFkG6XqqvkOZYUhERPwL4A,1220
131
- eva/vision/data/datasets/segmentation/lits.py,sha256=_9qdjKnYe5YsJ6_UAIrPwMeqoKHyHYmB7q-6uvXqdLQ,6246
132
- eva/vision/data/datasets/segmentation/monusac.py,sha256=vbXo-T3Rdu_zGja81ZbOimjZMlx2CnRZsC5nH-Dqkyg,8368
133
- eva/vision/data/datasets/segmentation/total_segmentator_2d.py,sha256=h2daCbFZPm48GjuOAOy0-Cd-WKFkFvus1ZWuoJZY9D4,13070
130
+ eva/vision/data/datasets/segmentation/lits.py,sha256=_R5AGFX8jVPwK3UKaYQfIRLBpM5ZmDg6KRziisUDYps,7175
131
+ eva/vision/data/datasets/segmentation/lits_balanced.py,sha256=s5kPfqB41Vkcm5Jh34mLAO0NweMSIlV2fMXJsRjJsF8,3384
132
+ eva/vision/data/datasets/segmentation/monusac.py,sha256=OTWHAD1b48WeT6phVf466w_nJUOGdBCGKWiWw68PAdw,8423
133
+ eva/vision/data/datasets/segmentation/total_segmentator_2d.py,sha256=pqrNRQu5kbKSd-l5jwaiE67qyF2jLQ3JrO7TjhGGF7w,13098
134
134
  eva/vision/data/datasets/structs.py,sha256=RaTDW-B36PumcR5gymhCiX-r8GiKqIFcjqoEEjjFyUE,389
135
135
  eva/vision/data/datasets/vision.py,sha256=hKKFMb65UJQzOyYm8FTGkOGBOinMRu7R8sOFMbCmQX4,1100
136
- eva/vision/data/datasets/wsi.py,sha256=JauEeQEC3niyivLa4FcI4X5GKvDRVpwY6BknzN-vKAQ,6611
136
+ eva/vision/data/datasets/wsi.py,sha256=-rypkcd6CPBM_oPuLszUx9q4zSPzeO1H6JKqvOtLlHw,8282
137
137
  eva/vision/data/transforms/__init__.py,sha256=WeFii6JwB0CiOOGLR3tkgAoKgRdmOf2lm0Dadixn8OI,260
138
138
  eva/vision/data/transforms/common/__init__.py,sha256=6tvxUgb8wfhgvqejMVulwqssHTJLF7f4_vpf44kxgxY,234
139
139
  eva/vision/data/transforms/common/resize_and_clamp.py,sha256=f9-YIX0S9GMAXHP7TWlyRlGfZIVvHgoBHqQ8PzaKbKs,1736
@@ -150,56 +150,69 @@ eva/vision/data/wsi/backends/openslide.py,sha256=VPVJDb6iAe0ZIdYbyFfPLDzHvku8PZX
150
150
  eva/vision/data/wsi/backends/pil.py,sha256=CqCWP1ViwpQyVKGLUoEtc4tCHXSAdQpMn6ZX2lNBMns,1403
151
151
  eva/vision/data/wsi/backends/tiffslide.py,sha256=f1xOiD4kpL0oRe3xFNT7BM2zYTWBduqL99skk-ZFRwE,1217
152
152
  eva/vision/data/wsi/patching/__init__.py,sha256=vSGyui2TkaJpw_wQJldP0Llnym5X9XgK17nuz7S5Hh8,189
153
- eva/vision/data/wsi/patching/coordinates.py,sha256=IzuF4i63bJYqdJH7eWQYR2q5QHw-80iV6QLibac6CWg,3475
153
+ eva/vision/data/wsi/patching/coordinates.py,sha256=TnsRafUJzsjvfhuP0vvDA294G0f4hu5LqICYNnMFUMo,3870
154
154
  eva/vision/data/wsi/patching/mask.py,sha256=o_S4YRdbfaxKCG1_T2skswDirmlzHzVC5exaDJucvD0,4986
155
155
  eva/vision/data/wsi/patching/samplers/__init__.py,sha256=QkBbjnZf7IcEPm-ON9SeZP0I3DXUA3pY87dKXXdelz4,458
156
- eva/vision/data/wsi/patching/samplers/_utils.py,sha256=aJI3mSJjfsMm4eNCAqIwMuXX0mGHl0WUa1vbC0DbbmY,1431
156
+ eva/vision/data/wsi/patching/samplers/_utils.py,sha256=e9kqHB6mhJgIIQV_Hv8QHg2P2R7dPVfd8Lt08Hi-UsQ,1374
157
157
  eva/vision/data/wsi/patching/samplers/base.py,sha256=KWLJMfaPk7-IZ-P2isYBvFAa5SuJPUhtD63hkKRFrgg,1287
158
158
  eva/vision/data/wsi/patching/samplers/foreground_grid.py,sha256=EhXkr5EFz2-RXEisWtjDa4CUTnrW4fiamQjEgALB2aI,3093
159
159
  eva/vision/data/wsi/patching/samplers/grid.py,sha256=dImrMSyCL3E_j5KRqpVJUWTe-mrJpfttg1Z9rbm3j0k,1363
160
- eva/vision/data/wsi/patching/samplers/random.py,sha256=qx5vExkmLgMFZgEwaXMmYFxoS-ewBhX-1Bpb1GGYkuI,1151
161
- eva/vision/losses/__init__.py,sha256=ZfUHa7siD3bBjiG4f39Eh4A0auaz0ctIKK0M9qfI-gY,95
162
- eva/vision/losses/dice.py,sha256=_D8Cj_m9AbOUhJS-GfsBbhfC-R9J58ao8UmuV_6OMhI,1424
160
+ eva/vision/data/wsi/patching/samplers/random.py,sha256=0clmwCZ47bnTaSFke7jtjsmrFoY1ID2LjoiaE52dC3o,1228
161
+ eva/vision/losses/__init__.py,sha256=htafabZgVcqbJjPURwsmGJ7AT6hIXc1-9SEuuaGU9SA,121
162
+ eva/vision/losses/dice.py,sha256=qIMxtQlBbzES3hJ7x7pq0bd0GvIFpfGNL3KnMi0vRds,3669
163
+ eva/vision/metrics/__init__.py,sha256=NtbcCrAUhVMMxSygTnbvNceJZBUzUD2tZp4nDmFjG3w,360
164
+ eva/vision/metrics/defaults/__init__.py,sha256=ncQ9uH5q5SpfalyPX6dINPRLk34HLw6z9u8ny_HHbFQ,174
165
+ eva/vision/metrics/defaults/segmentation/__init__.py,sha256=ve6dwyfhJGYBYKS6l6OySCBs32JnEBFnvhAyNvj-Uqo,191
166
+ eva/vision/metrics/defaults/segmentation/multiclass.py,sha256=Qk4-OC0oujg99MEwRIjif-fS2mbAiWSFTZVRq4cY_Vo,1758
167
+ eva/vision/metrics/segmentation/BUILD,sha256=Nf7BYWWe1USoFEIsIiEVZ8sa05J5FPkMJ-UIMDLrU8o,17
168
+ eva/vision/metrics/segmentation/__init__.py,sha256=lfuyjuo2XjS7_dvaL-8e_LhLsyK2mc_mNGwyGqcb5X4,234
169
+ eva/vision/metrics/segmentation/_utils.py,sha256=ebxTqymtxZ0iwMiH2snQHV_NVfDhZUSBlBS9AShWu_8,2464
170
+ eva/vision/metrics/segmentation/generalized_dice.py,sha256=FqFzo7YWBwSlihmlgQg-O_ld1ZBQma0YTXk5XZ7faZM,2443
171
+ eva/vision/metrics/segmentation/mean_iou.py,sha256=xR3wQOHT77SNKTRRPdDaWpJ88qgk9PIBT5n2lnKTUfM,2161
163
172
  eva/vision/models/__init__.py,sha256=a-P6JL73A3miHQnqgqUz07XtVmQB_o4DqPImk5rEATo,275
164
173
  eva/vision/models/modules/__init__.py,sha256=vaM_V6OF2s0lYjralP8dzv8mAtv_xIMZItfXgz0NZg8,156
165
- eva/vision/models/modules/semantic_segmentation.py,sha256=poBss37CM-bGLrtAl08WTcJtQgzwEP1MJgjeEbxexk0,6255
174
+ eva/vision/models/modules/semantic_segmentation.py,sha256=VrzQemVgJbbXVQVzvvDjRaeXNhHsWDXRmTXGZhe-VCo,6389
166
175
  eva/vision/models/networks/__init__.py,sha256=j43IurizNlAyKPH2jwDHaeq49L2QvwbHWqUaptA1mG4,100
167
176
  eva/vision/models/networks/abmil.py,sha256=N1eH4fn1nXmgXurSQyQIxxonv7nsqeeuPWaQSHeltfs,6796
168
177
  eva/vision/models/networks/backbones/__init__.py,sha256=LsMx92eEoCQ5aNVFp7mHjrD-9ZeNawMiK6zZSYzl_PU,296
169
- eva/vision/models/networks/backbones/_utils.py,sha256=I8YrBsIVtCsp13xs1ln_OrhKBRu2gOmJdopL9hx_MBk,1277
170
- eva/vision/models/networks/backbones/pathology/__init__.py,sha256=-kn7JCC7fs8-VvjGQURQsdQejKYOwhPJch37Cf1crDM,1005
178
+ eva/vision/models/networks/backbones/_utils.py,sha256=V7xeod4mElEuuO1TRW0xJE051cUyS1Saraw3-KcK1Mw,1667
179
+ eva/vision/models/networks/backbones/pathology/__init__.py,sha256=goR59h8bfzd-Wa3rxPPdaSlAOH_df8SHBkTSKi08TS8,1147
171
180
  eva/vision/models/networks/backbones/pathology/bioptimus.py,sha256=wUSKjYgxcRV3FRHGaPwF1uRAQcGO0rHNHGmK1QDJXk4,991
172
181
  eva/vision/models/networks/backbones/pathology/gigapath.py,sha256=mfGXtKhY7XLpKQQAFNVZYsM-aeHCEbOVUrxpAEOr-l8,955
173
- eva/vision/models/networks/backbones/pathology/histai.py,sha256=C05W_75bINtTnet25M0axiVt00TMmcCx2U5Fcr7n-_I,1570
182
+ eva/vision/models/networks/backbones/pathology/histai.py,sha256=X_we3U7GK91RrXyOX2PJB-YFDF2ozdL2fzZhNxm9SVU,1914
174
183
  eva/vision/models/networks/backbones/pathology/kaiko.py,sha256=GSdBG4WXrs1PWB2hr-sy_dFe2riwpPKwHx71esDoVfE,3952
175
184
  eva/vision/models/networks/backbones/pathology/lunit.py,sha256=ku4lr9pWeeHatHN4x4OVgwlve9sVqiRqIbgI0PXLiqg,2160
176
- eva/vision/models/networks/backbones/pathology/mahmood.py,sha256=3iIGKD7AvPDTritNkT2NGd6Nb5iJQxBKPmymI5YpOzo,2042
177
- eva/vision/models/networks/backbones/pathology/owkin.py,sha256=EdP4d1ndIR4URZHuSVfn01AdSe5n77KB5Sq4XEFmYwo,713
185
+ eva/vision/models/networks/backbones/pathology/mahmood.py,sha256=me8DXf9nsEegDmltP8f7ZnG89xYVEKzZLKfVzMZjWDs,1832
186
+ eva/vision/models/networks/backbones/pathology/owkin.py,sha256=uWJV5fgY7UZX6ilgGzkPY9fnlOiF03W7E8rc9TmlHGg,1231
187
+ eva/vision/models/networks/backbones/pathology/paige.py,sha256=MjOLgdEKk8tdAIpCiHelasGwPE7xgzaooW6EE7IsuEE,1642
178
188
  eva/vision/models/networks/backbones/registry.py,sha256=anjILtEHHB6Ltwiw22h1bsgWtIjh_l5_fkPh87K7-d0,1631
179
189
  eva/vision/models/networks/backbones/timm/__init__.py,sha256=cZH3av9gIZcvEVD0rwKsI-MEq7zPqaW4dQ0E05CksvQ,128
180
190
  eva/vision/models/networks/backbones/timm/backbones.py,sha256=fCTiwqU6NhQ-ccAMzmpPDddXkFzRAB3mw4lcQ9um_PU,1646
181
191
  eva/vision/models/networks/backbones/universal/__init__.py,sha256=MAlkALSJ2_w6spSbB7NmKlL0Jsk1YKEycatdI0xO0_I,252
182
192
  eva/vision/models/networks/backbones/universal/vit.py,sha256=kpUCoXpefR34hRNlQDFK9lGr4oqS8Mn5vTLKWZ-gaOs,1820
183
- eva/vision/models/networks/decoders/__init__.py,sha256=kW79anaDHRm0Tkxt7ZIpYpaMggx8RGK2mogs77n-c6k,190
184
- eva/vision/models/networks/decoders/decoder.py,sha256=0tEx-eWEbNA53oafUbJkTb3j0watPpdntXMrQ66azsU,150
185
- eva/vision/models/networks/decoders/segmentation/__init__.py,sha256=bdGL_R44cyutqNXEMYMwA_RtqbdTL5xt2TVdS5BjGps,439
186
- eva/vision/models/networks/decoders/segmentation/common.py,sha256=4gxTimvc-JRzbIlD4yfGWXIjcEJSP_iY79h-mheDryc,2525
187
- eva/vision/models/networks/decoders/segmentation/conv2d.py,sha256=fv-0tF7_Ey4EH5iW08enPoaRrziiqbCfjrl1i50ZgfI,4092
188
- eva/vision/models/networks/decoders/segmentation/linear.py,sha256=89kDvs-e7Y3Bs3TQvmt2K7_cQYkv0T65A_nBh_anqFQ,4736
193
+ eva/vision/models/networks/decoders/__init__.py,sha256=RXFWmoYw2i6E9VOUCJmU8c72icHannVuo-cUKy6fnLM,200
194
+ eva/vision/models/networks/decoders/segmentation/__init__.py,sha256=N6jrhXHj0P7i7RptZbZ-JFehT2BM7meFyNIK0owAkaE,517
195
+ eva/vision/models/networks/decoders/segmentation/base.py,sha256=b2TIJKiJR9vejVRpNyedMJLPTrpHhAEXvco8atb9TPU,411
196
+ eva/vision/models/networks/decoders/segmentation/decoder2d.py,sha256=0jZrgFSdH5nlMYlbBmDb1E4kIQ3cG-LNOsiij51_NSA,4447
197
+ eva/vision/models/networks/decoders/segmentation/linear.py,sha256=-i9RVaKM1UsB3AXDDKdMmHiD7y2sr5HfF-WvkB47Fhw,4743
198
+ eva/vision/models/networks/decoders/segmentation/semantic/__init__.py,sha256=Ubs8GXyQpEHs26JUeUuiVP3jfn47eiBZM_UVbu749XU,398
199
+ eva/vision/models/networks/decoders/segmentation/semantic/common.py,sha256=fPTb0T-2FiOU-jT81ynASKaW7fJiRk6vQjuPkzHOluc,2530
200
+ eva/vision/models/networks/decoders/segmentation/semantic/with_image.py,sha256=I5PyGKKo8DcXYcw4xlCFzuavRJNRrzGT-szpDidMPXI,3516
201
+ eva/vision/models/networks/decoders/segmentation/typings.py,sha256=8zAqIJLlQdCjsx-Dl4lnF4BB1VxTg_AyIquBVwpZlHg,537
189
202
  eva/vision/models/wrappers/__init__.py,sha256=8MT8qFM4nUXGpK1_i3rp70ODkOjn2KhhRo2I17qZCPM,210
190
203
  eva/vision/models/wrappers/from_registry.py,sha256=gdnxyg9drqlxfTNuS3aLbWGbZIwX1VNl0uudfjzVsXM,1614
191
204
  eva/vision/models/wrappers/from_timm.py,sha256=Z38Nb1i6OPKkgvFZOvGx-O3AZQuscf1zRVyrEBXQdJg,2320
192
205
  eva/vision/utils/__init__.py,sha256=vaUovprE743SmyFH8l6uk4pYSWpI4zxn7lN0EwePTJI,96
193
206
  eva/vision/utils/colormap.py,sha256=P904auPzaxGESTjFcbv550fc49DeXklSHkuhXWFXCEo,2384
194
207
  eva/vision/utils/convert.py,sha256=fqGmKrg5-JJLrTkTXB4YDcWTudXPrO1gGjsckVRUesU,1881
195
- eva/vision/utils/io/__init__.py,sha256=B9z6YiPUTI2aNDvN7t90_WugPE-L1d_1017aNeOkuZo,517
208
+ eva/vision/utils/io/__init__.py,sha256=XGJ_W94DVEYXJ_tVpr_20NMpR5JLWEWHGF3v9Low79A,610
196
209
  eva/vision/utils/io/_utils.py,sha256=JzOt7Frj6ScF_aNjFtfHBn4ROnl6NhUZucmQhLc4Cww,768
197
210
  eva/vision/utils/io/image.py,sha256=IdOkr5MYqhYHz8U9drZ7wULTM3YHwCWSjZlu_Qdl4GQ,2053
198
211
  eva/vision/utils/io/mat.py,sha256=qpGifyjmpE0Xhv567Si7-zxKrgkgE0sywP70cHiLFGU,808
199
- eva/vision/utils/io/nifti.py,sha256=O_5x3A7RySfZYkF8KG5nmLQf1FcbhnJBVNVf71m3Lo4,2189
212
+ eva/vision/utils/io/nifti.py,sha256=Q8Cd-ovqGZbevqfhb4waS6xI5xV3DXoWnDd5rhzLRNU,2595
200
213
  eva/vision/utils/io/text.py,sha256=qYgfo_ZaDZWfG02NkVVYzo5QFySqdCCz5uLA9d-zXtI,701
201
- kaiko_eva-0.1.0.dist-info/METADATA,sha256=abBwr5ckrDbZSvgkD0l4SPO1ZqIQjFuiuKrbY2C72ZA,26806
202
- kaiko_eva-0.1.0.dist-info/WHEEL,sha256=Vza3XR51HW1KmFP0iIMUVYIvz0uQuKJpIXKYOBGQyFQ,90
203
- kaiko_eva-0.1.0.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
204
- kaiko_eva-0.1.0.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
205
- kaiko_eva-0.1.0.dist-info/RECORD,,
214
+ kaiko_eva-0.1.3.dist-info/METADATA,sha256=xgKnK4lR6GSdWW0oB52wY7spKYlq_jq19AAjJREHpBg,24869
215
+ kaiko_eva-0.1.3.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
216
+ kaiko_eva-0.1.3.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
217
+ kaiko_eva-0.1.3.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
218
+ kaiko_eva-0.1.3.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: pdm-backend (2.4.1)
2
+ Generator: pdm-backend (2.4.3)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,120 +0,0 @@
1
- """Mean Intersection over Union (mIoU) metric for semantic segmentation."""
2
-
3
- from typing import Any, Literal, Tuple
4
-
5
- import torch
6
- import torchmetrics
7
-
8
-
9
- class MeanIoU(torchmetrics.Metric):
10
- """Computes Mean Intersection over Union (mIoU) for semantic segmentation.
11
-
12
- Fixes the torchmetrics implementation
13
- (issue https://github.com/Lightning-AI/torchmetrics/issues/2558)
14
- """
15
-
16
- def __init__(
17
- self,
18
- num_classes: int,
19
- include_background: bool = True,
20
- ignore_index: int | None = None,
21
- per_class: bool = False,
22
- **kwargs: Any,
23
- ) -> None:
24
- """Initializes the metric.
25
-
26
- Args:
27
- num_classes: The number of classes in the segmentation problem.
28
- include_background: Whether to include the background class in the computation
29
- ignore_index: Integer specifying a target class to ignore. If given, this class
30
- index does not contribute to the returned score, regardless of reduction method.
31
- per_class: Whether to compute the IoU for each class separately. If set to ``False``,
32
- the metric will compute the mean IoU over all classes.
33
- kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
34
- """
35
- super().__init__(**kwargs)
36
-
37
- self.num_classes = num_classes
38
- self.include_background = include_background
39
- self.ignore_index = ignore_index
40
- self.per_class = per_class
41
-
42
- self.add_state("intersection", default=torch.zeros(num_classes), dist_reduce_fx="sum")
43
- self.add_state("union", default=torch.zeros(num_classes), dist_reduce_fx="sum")
44
-
45
- def update(self, preds: torch.Tensor, target: torch.Tensor) -> None:
46
- """Update the state with the new data."""
47
- intersection, union = _compute_intersection_and_union(
48
- preds,
49
- target,
50
- num_classes=self.num_classes,
51
- include_background=self.include_background,
52
- ignore_index=self.ignore_index,
53
- )
54
- self.intersection += intersection.sum(0)
55
- self.union += union.sum(0)
56
-
57
- def compute(self) -> torch.Tensor:
58
- """Compute the final mean IoU score."""
59
- iou_valid = torch.gt(self.union, 0)
60
- iou = torch.where(
61
- iou_valid,
62
- torch.divide(self.intersection, self.union),
63
- torch.nan,
64
- )
65
- if not self.per_class:
66
- iou = torch.mean(iou[iou_valid])
67
- return iou
68
-
69
-
70
- def _compute_intersection_and_union(
71
- preds: torch.Tensor,
72
- target: torch.Tensor,
73
- num_classes: int,
74
- include_background: bool = False,
75
- input_format: Literal["one-hot", "index"] = "index",
76
- ignore_index: int | None = None,
77
- ) -> Tuple[torch.Tensor, torch.Tensor]:
78
- """Compute the intersection and union for semantic segmentation tasks.
79
-
80
- Args:
81
- preds: Predicted tensor with shape (N, ...) where N is the batch size.
82
- The shape can be (N, H, W) for 2D data or (N, D, H, W) for 3D data.
83
- target: Ground truth tensor with the same shape as preds.
84
- num_classes: Number of classes in the segmentation task.
85
- include_background: Whether to include the background class in the computation.
86
- input_format: Format of the input tensors.
87
- ignore_index: Integer specifying a target class to ignore. If given, this class
88
- index does not contribute to the returned score, regardless of reduction method.
89
-
90
- Returns:
91
- Two tensors representing the intersection and union for each class.
92
- Shape of each tensor is (N, num_classes).
93
-
94
- Note:
95
- - If input_format is "index", the tensors are converted to one-hot encoding.
96
- - If include_background is `False`, the background class
97
- (assumed to be the first channel) is ignored in the computation.
98
- """
99
- if ignore_index is not None:
100
- mask = target != ignore_index
101
- mask = mask.all(dim=-1, keepdim=True)
102
- preds = preds * mask
103
- target = target * mask
104
-
105
- if input_format == "index":
106
- preds = torch.nn.functional.one_hot(preds, num_classes=num_classes)
107
- target = torch.nn.functional.one_hot(target, num_classes=num_classes)
108
-
109
- if not include_background:
110
- preds[..., 0] = 0
111
- target[..., 0] = 0
112
-
113
- reduce_axis = list(range(1, preds.ndim - 1))
114
-
115
- intersection = torch.sum(torch.logical_and(preds, target), dim=reduce_axis)
116
- target_sum = torch.sum(target, dim=reduce_axis)
117
- pred_sum = torch.sum(preds, dim=reduce_axis)
118
- union = target_sum + pred_sum - intersection
119
-
120
- return intersection, union
@@ -1,7 +0,0 @@
1
- """Semantic segmentation decoder base class."""
2
-
3
- from torch import nn
4
-
5
-
6
- class Decoder(nn.Module):
7
- """Semantic segmentation decoder base class."""