kaiko-eva 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaiko-eva might be problematic. Click here for more details.

Files changed (85) hide show
  1. eva/core/data/datasets/base.py +7 -2
  2. eva/core/models/modules/head.py +4 -2
  3. eva/core/models/modules/typings.py +2 -2
  4. eva/core/models/transforms/__init__.py +2 -1
  5. eva/core/models/transforms/as_discrete.py +57 -0
  6. eva/core/models/wrappers/_utils.py +121 -1
  7. eva/core/trainers/_recorder.py +4 -1
  8. eva/core/utils/suppress_logs.py +28 -0
  9. eva/vision/data/__init__.py +2 -2
  10. eva/vision/data/dataloaders/__init__.py +5 -0
  11. eva/vision/data/dataloaders/collate_fn/__init__.py +5 -0
  12. eva/vision/data/dataloaders/collate_fn/collection.py +22 -0
  13. eva/vision/data/datasets/__init__.py +2 -2
  14. eva/vision/data/datasets/classification/bach.py +3 -4
  15. eva/vision/data/datasets/classification/bracs.py +3 -4
  16. eva/vision/data/datasets/classification/breakhis.py +3 -4
  17. eva/vision/data/datasets/classification/camelyon16.py +4 -5
  18. eva/vision/data/datasets/classification/crc.py +3 -4
  19. eva/vision/data/datasets/classification/gleason_arvaniti.py +3 -4
  20. eva/vision/data/datasets/classification/mhist.py +3 -4
  21. eva/vision/data/datasets/classification/panda.py +4 -5
  22. eva/vision/data/datasets/classification/patch_camelyon.py +3 -4
  23. eva/vision/data/datasets/classification/unitopatho.py +3 -4
  24. eva/vision/data/datasets/classification/wsi.py +6 -5
  25. eva/vision/data/datasets/segmentation/__init__.py +2 -2
  26. eva/vision/data/datasets/segmentation/_utils.py +47 -0
  27. eva/vision/data/datasets/segmentation/bcss.py +7 -8
  28. eva/vision/data/datasets/segmentation/btcv.py +236 -0
  29. eva/vision/data/datasets/segmentation/consep.py +6 -7
  30. eva/vision/data/datasets/segmentation/lits.py +9 -8
  31. eva/vision/data/datasets/segmentation/lits_balanced.py +2 -1
  32. eva/vision/data/datasets/segmentation/monusac.py +4 -5
  33. eva/vision/data/datasets/segmentation/total_segmentator_2d.py +12 -10
  34. eva/vision/data/datasets/vision.py +95 -4
  35. eva/vision/data/datasets/wsi.py +5 -5
  36. eva/vision/data/transforms/__init__.py +22 -3
  37. eva/vision/data/transforms/common/__init__.py +1 -2
  38. eva/vision/data/transforms/croppad/__init__.py +11 -0
  39. eva/vision/data/transforms/croppad/crop_foreground.py +110 -0
  40. eva/vision/data/transforms/croppad/rand_crop_by_pos_neg_label.py +109 -0
  41. eva/vision/data/transforms/croppad/spatial_pad.py +67 -0
  42. eva/vision/data/transforms/intensity/__init__.py +11 -0
  43. eva/vision/data/transforms/intensity/rand_scale_intensity.py +59 -0
  44. eva/vision/data/transforms/intensity/rand_shift_intensity.py +55 -0
  45. eva/vision/data/transforms/intensity/scale_intensity_ranged.py +56 -0
  46. eva/vision/data/transforms/spatial/__init__.py +7 -0
  47. eva/vision/data/transforms/spatial/flip.py +72 -0
  48. eva/vision/data/transforms/spatial/rotate.py +53 -0
  49. eva/vision/data/transforms/spatial/spacing.py +69 -0
  50. eva/vision/data/transforms/utility/__init__.py +5 -0
  51. eva/vision/data/transforms/utility/ensure_channel_first.py +51 -0
  52. eva/vision/data/tv_tensors/__init__.py +5 -0
  53. eva/vision/data/tv_tensors/volume.py +61 -0
  54. eva/vision/metrics/segmentation/monai_dice.py +9 -2
  55. eva/vision/models/modules/semantic_segmentation.py +32 -19
  56. eva/vision/models/networks/backbones/__init__.py +9 -2
  57. eva/vision/models/networks/backbones/pathology/__init__.py +11 -2
  58. eva/vision/models/networks/backbones/pathology/bioptimus.py +47 -1
  59. eva/vision/models/networks/backbones/pathology/hkust.py +69 -0
  60. eva/vision/models/networks/backbones/pathology/kaiko.py +18 -0
  61. eva/vision/models/networks/backbones/radiology/__init__.py +11 -0
  62. eva/vision/models/networks/backbones/radiology/swin_unetr.py +231 -0
  63. eva/vision/models/networks/backbones/radiology/voco.py +75 -0
  64. eva/vision/models/networks/decoders/segmentation/__init__.py +6 -2
  65. eva/vision/models/networks/decoders/segmentation/linear.py +5 -10
  66. eva/vision/models/networks/decoders/segmentation/semantic/__init__.py +8 -1
  67. eva/vision/models/networks/decoders/segmentation/semantic/swin_unetr.py +104 -0
  68. eva/vision/utils/io/__init__.py +2 -0
  69. eva/vision/utils/io/nifti.py +91 -11
  70. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/METADATA +16 -12
  71. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/RECORD +74 -58
  72. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/WHEEL +1 -1
  73. eva/vision/data/datasets/classification/base.py +0 -96
  74. eva/vision/data/datasets/segmentation/base.py +0 -96
  75. eva/vision/data/transforms/common/resize_and_clamp.py +0 -51
  76. eva/vision/data/transforms/normalization/__init__.py +0 -6
  77. eva/vision/data/transforms/normalization/clamp.py +0 -43
  78. eva/vision/data/transforms/normalization/functional/__init__.py +0 -5
  79. eva/vision/data/transforms/normalization/functional/rescale_intensity.py +0 -28
  80. eva/vision/data/transforms/normalization/rescale_intensity.py +0 -53
  81. eva/vision/metrics/segmentation/BUILD +0 -1
  82. eva/vision/models/networks/backbones/torchhub/__init__.py +0 -5
  83. eva/vision/models/networks/backbones/torchhub/backbones.py +0 -61
  84. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/entry_points.txt +0 -0
  85. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/licenses/LICENSE +0 -0
@@ -1,28 +0,0 @@
1
- """Intensity level functions."""
2
-
3
- import sys
4
- from typing import Tuple
5
-
6
- import torch
7
-
8
-
9
- def rescale_intensity(
10
- image: torch.Tensor,
11
- in_range: Tuple[float, float] | None = None,
12
- out_range: Tuple[float, float] = (0.0, 1.0),
13
- ) -> torch.Tensor:
14
- """Stretches or shrinks the image intensity levels.
15
-
16
- Args:
17
- image: The image tensor as float-type.
18
- in_range: The input data range. If `None`, it will
19
- fetch the min and max of the input image.
20
- out_range: The desired intensity range of the output.
21
-
22
- Returns:
23
- The image tensor after stretching or shrinking its intensity levels.
24
- """
25
- imin, imax = in_range or (image.min(), image.max())
26
- omin, omax = out_range
27
- image_scaled = (image - imin) / (imax - imin + sys.float_info.epsilon)
28
- return image_scaled * (omax - omin) + omin
@@ -1,53 +0,0 @@
1
- """Intensity level scaling transform."""
2
-
3
- import functools
4
- from typing import Any, Dict, Tuple
5
-
6
- import torch
7
- import torchvision.transforms.v2 as torch_transforms
8
- from torchvision import tv_tensors
9
- from typing_extensions import override
10
-
11
- from eva.vision.data.transforms.normalization import functional
12
-
13
-
14
- class RescaleIntensity(torch_transforms.Transform):
15
- """Stretches or shrinks the image intensity levels."""
16
-
17
- def __init__(
18
- self,
19
- in_range: Tuple[float, float] | None = None,
20
- out_range: Tuple[float, float] = (0.0, 1.0),
21
- ) -> None:
22
- """Initializes the transform.
23
-
24
- Args:
25
- in_range: The input data range. If `None`, it will
26
- fetch the min and max of the input image.
27
- out_range: The desired intensity range of the output.
28
- """
29
- super().__init__()
30
-
31
- self._in_range = in_range
32
- self._out_range = out_range
33
-
34
- @functools.singledispatchmethod
35
- @override
36
- def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
37
- return inpt
38
-
39
- @_transform.register(torch.Tensor)
40
- def _(self, inpt: torch.Tensor, params: Dict[str, Any]) -> Any:
41
- return functional.rescale_intensity(
42
- inpt, in_range=self._in_range, out_range=self._out_range
43
- )
44
-
45
- @_transform.register(tv_tensors.Image)
46
- def _(self, inpt: tv_tensors.Image, params: Dict[str, Any]) -> Any:
47
- scaled_inpt = functional.rescale_intensity(inpt, out_range=self._out_range)
48
- return tv_tensors.wrap(scaled_inpt, like=inpt)
49
-
50
- @_transform.register(tv_tensors.BoundingBoxes)
51
- @_transform.register(tv_tensors.Mask)
52
- def _(self, inpt: tv_tensors.BoundingBoxes | tv_tensors.Mask, params: Dict[str, Any]) -> Any:
53
- return inpt
@@ -1 +0,0 @@
1
- python_sources()
@@ -1,5 +0,0 @@
1
- """torch.hub backbones API."""
2
-
3
- from eva.vision.models.networks.backbones.torchhub.backbones import torch_hub_model
4
-
5
- __all__ = ["torch_hub_model"]
@@ -1,61 +0,0 @@
1
- """torch.hub backbones."""
2
-
3
- import functools
4
- from typing import Tuple
5
-
6
- import torch
7
- from loguru import logger
8
- from torch import nn
9
-
10
- from eva.core.models import wrappers
11
- from eva.vision.models.networks.backbones.registry import BackboneModelRegistry
12
-
13
- HUB_REPOS = ["facebookresearch/dinov2:main", "kaiko-ai/towards_large_pathology_fms"]
14
- """List of torch.hub repositories for which to add the models to the registry."""
15
-
16
-
17
- def torch_hub_model(
18
- model_name: str,
19
- repo_or_dir: str,
20
- checkpoint_path: str | None = None,
21
- pretrained: bool = False,
22
- out_indices: int | Tuple[int, ...] | None = None,
23
- **kwargs,
24
- ) -> nn.Module:
25
- """Initializes any ViT model from torch.hub with weights from a specified checkpoint.
26
-
27
- Args:
28
- model_name: The name of the model to load.
29
- repo_or_dir: The torch.hub repository or local directory to load the model from.
30
- checkpoint_path: The path to the checkpoint file.
31
- pretrained: If set to `True`, load pretrained model weights if available.
32
- out_indices: Whether and which multi-level patch embeddings to return.
33
- **kwargs: Additional arguments to pass to the model
34
-
35
- Returns:
36
- The VIT model instance.
37
- """
38
- logger.info(
39
- f"Loading torch.hub model {model_name} from {repo_or_dir}"
40
- + (f"using checkpoint {checkpoint_path}" if checkpoint_path else "")
41
- )
42
-
43
- return wrappers.TorchHubModel(
44
- model_name=model_name,
45
- repo_or_dir=repo_or_dir,
46
- pretrained=pretrained,
47
- checkpoint_path=checkpoint_path or "",
48
- out_indices=out_indices,
49
- model_kwargs=kwargs,
50
- )
51
-
52
-
53
- BackboneModelRegistry._registry.update(
54
- {
55
- f"torchhub/{repo}:{model_name}": functools.partial(
56
- torch_hub_model, model_name=model_name, repo_or_dir=repo
57
- )
58
- for repo in HUB_REPOS
59
- for model_name in torch.hub.list(repo, verbose=False)
60
- }
61
- )