kaiko-eva 0.2.2__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaiko-eva might be problematic. Click here for more details.
- eva/core/data/dataloaders/__init__.py +2 -1
- eva/core/data/dataloaders/collate_fn/__init__.py +5 -0
- eva/core/data/dataloaders/collate_fn/collate.py +24 -0
- eva/core/data/dataloaders/dataloader.py +4 -0
- eva/core/interface/interface.py +34 -1
- eva/core/metrics/defaults/classification/multiclass.py +45 -35
- eva/core/models/modules/__init__.py +2 -1
- eva/core/models/modules/scheduler.py +51 -0
- eva/core/models/transforms/extract_cls_features.py +1 -1
- eva/core/models/transforms/extract_patch_features.py +1 -1
- eva/core/models/wrappers/base.py +17 -14
- eva/core/models/wrappers/from_function.py +5 -4
- eva/core/models/wrappers/from_torchhub.py +5 -6
- eva/core/models/wrappers/huggingface.py +8 -5
- eva/core/models/wrappers/onnx.py +4 -4
- eva/core/trainers/functional.py +40 -43
- eva/core/utils/factory.py +66 -0
- eva/core/utils/registry.py +42 -0
- eva/core/utils/requirements.py +26 -0
- eva/language/__init__.py +13 -0
- eva/language/data/__init__.py +5 -0
- eva/language/data/datasets/__init__.py +9 -0
- eva/language/data/datasets/classification/__init__.py +7 -0
- eva/language/data/datasets/classification/base.py +63 -0
- eva/language/data/datasets/classification/pubmedqa.py +149 -0
- eva/language/data/datasets/language.py +13 -0
- eva/language/models/__init__.py +25 -0
- eva/language/models/modules/__init__.py +5 -0
- eva/language/models/modules/text.py +85 -0
- eva/language/models/modules/typings.py +16 -0
- eva/language/models/wrappers/__init__.py +11 -0
- eva/language/models/wrappers/huggingface.py +69 -0
- eva/language/models/wrappers/litellm.py +77 -0
- eva/language/models/wrappers/vllm.py +149 -0
- eva/language/utils/__init__.py +5 -0
- eva/language/utils/str_to_int_tensor.py +95 -0
- eva/vision/data/dataloaders/__init__.py +2 -1
- eva/vision/data/dataloaders/worker_init.py +35 -0
- eva/vision/data/datasets/__init__.py +5 -5
- eva/vision/data/datasets/segmentation/__init__.py +4 -4
- eva/vision/data/datasets/segmentation/btcv.py +3 -0
- eva/vision/data/datasets/segmentation/consep.py +5 -4
- eva/vision/data/datasets/segmentation/lits17.py +231 -0
- eva/vision/data/datasets/segmentation/metadata/__init__.py +1 -0
- eva/vision/data/datasets/segmentation/metadata/_msd_task7_pancreas.py +287 -0
- eva/vision/data/datasets/segmentation/msd_task7_pancreas.py +243 -0
- eva/vision/data/datasets/segmentation/total_segmentator_2d.py +1 -1
- eva/vision/data/transforms/__init__.py +11 -2
- eva/vision/data/transforms/base/__init__.py +5 -0
- eva/vision/data/transforms/base/monai.py +27 -0
- eva/vision/data/transforms/common/__init__.py +2 -1
- eva/vision/data/transforms/common/squeeze.py +24 -0
- eva/vision/data/transforms/croppad/__init__.py +4 -0
- eva/vision/data/transforms/croppad/rand_crop_by_label_classes.py +74 -0
- eva/vision/data/transforms/croppad/rand_crop_by_pos_neg_label.py +6 -2
- eva/vision/data/transforms/croppad/rand_spatial_crop.py +89 -0
- eva/vision/data/transforms/intensity/rand_scale_intensity.py +6 -2
- eva/vision/data/transforms/intensity/rand_shift_intensity.py +8 -4
- eva/vision/models/modules/semantic_segmentation.py +18 -7
- eva/vision/models/networks/backbones/__init__.py +2 -3
- eva/vision/models/networks/backbones/_utils.py +1 -1
- eva/vision/models/networks/backbones/pathology/bioptimus.py +4 -4
- eva/vision/models/networks/backbones/pathology/gigapath.py +2 -2
- eva/vision/models/networks/backbones/pathology/histai.py +3 -3
- eva/vision/models/networks/backbones/pathology/hkust.py +2 -2
- eva/vision/models/networks/backbones/pathology/kaiko.py +7 -7
- eva/vision/models/networks/backbones/pathology/lunit.py +3 -3
- eva/vision/models/networks/backbones/pathology/mahmood.py +3 -3
- eva/vision/models/networks/backbones/pathology/owkin.py +3 -3
- eva/vision/models/networks/backbones/pathology/paige.py +3 -3
- eva/vision/models/networks/backbones/radiology/swin_unetr.py +2 -2
- eva/vision/models/networks/backbones/radiology/voco.py +5 -5
- eva/vision/models/networks/backbones/registry.py +2 -44
- eva/vision/models/networks/backbones/timm/backbones.py +2 -2
- eva/vision/models/networks/backbones/universal/__init__.py +8 -1
- eva/vision/models/networks/backbones/universal/vit.py +53 -3
- eva/vision/models/networks/decoders/segmentation/decoder2d.py +1 -1
- eva/vision/models/networks/decoders/segmentation/linear.py +1 -1
- eva/vision/models/networks/decoders/segmentation/semantic/common.py +2 -2
- eva/vision/models/networks/decoders/segmentation/typings.py +1 -1
- eva/vision/models/wrappers/from_registry.py +14 -9
- eva/vision/models/wrappers/from_timm.py +6 -5
- {kaiko_eva-0.2.2.dist-info → kaiko_eva-0.3.1.dist-info}/METADATA +10 -2
- {kaiko_eva-0.2.2.dist-info → kaiko_eva-0.3.1.dist-info}/RECORD +88 -57
- {kaiko_eva-0.2.2.dist-info → kaiko_eva-0.3.1.dist-info}/WHEEL +1 -1
- eva/vision/data/datasets/segmentation/lits.py +0 -199
- eva/vision/data/datasets/segmentation/lits_balanced.py +0 -94
- /eva/vision/data/datasets/segmentation/{_total_segmentator.py → metadata/_total_segmentator.py} +0 -0
- {kaiko_eva-0.2.2.dist-info → kaiko_eva-0.3.1.dist-info}/entry_points.txt +0 -0
- {kaiko_eva-0.2.2.dist-info → kaiko_eva-0.3.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,94 +0,0 @@
|
|
|
1
|
-
"""Balanced LiTS dataset."""
|
|
2
|
-
|
|
3
|
-
from typing import Callable, Dict, List, Literal, Tuple
|
|
4
|
-
|
|
5
|
-
import numpy as np
|
|
6
|
-
from typing_extensions import override
|
|
7
|
-
|
|
8
|
-
from eva.vision.data.datasets.segmentation import lits
|
|
9
|
-
from eva.vision.utils import io
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class LiTSBalanced(lits.LiTS):
|
|
13
|
-
"""Balanced version of the LiTS - Liver Tumor Segmentation Challenge dataset.
|
|
14
|
-
|
|
15
|
-
For each volume in the dataset, we sample the same number of slices where
|
|
16
|
-
only the liver and where both liver and tumor are present.
|
|
17
|
-
|
|
18
|
-
Webpage: https://competitions.codalab.org/competitions/17094
|
|
19
|
-
|
|
20
|
-
For the splits we follow: https://arxiv.org/pdf/2010.01663v2
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
|
-
_expected_dataset_lengths: Dict[str | None, int] = {
|
|
24
|
-
"train": 5514,
|
|
25
|
-
"val": 1332,
|
|
26
|
-
"test": 1530,
|
|
27
|
-
None: 8376,
|
|
28
|
-
}
|
|
29
|
-
"""Dataset version and split to the expected size."""
|
|
30
|
-
|
|
31
|
-
def __init__(
|
|
32
|
-
self,
|
|
33
|
-
root: str,
|
|
34
|
-
split: Literal["train", "val", "test"] | None = None,
|
|
35
|
-
transforms: Callable | None = None,
|
|
36
|
-
seed: int = 8,
|
|
37
|
-
) -> None:
|
|
38
|
-
"""Initialize dataset.
|
|
39
|
-
|
|
40
|
-
Args:
|
|
41
|
-
root: Path to the root directory of the dataset. The dataset will
|
|
42
|
-
be downloaded and extracted here, if it does not already exist.
|
|
43
|
-
split: Dataset split to use.
|
|
44
|
-
transforms: A function/transforms that takes in an image and a target
|
|
45
|
-
mask and returns the transformed versions of both.
|
|
46
|
-
seed: Seed used for generating the dataset splits and sampling of the slices.
|
|
47
|
-
"""
|
|
48
|
-
super().__init__(root=root, split=split, transforms=transforms, seed=seed)
|
|
49
|
-
|
|
50
|
-
@override
|
|
51
|
-
def _create_indices(self) -> List[Tuple[int, int]]:
|
|
52
|
-
"""Builds the dataset indices for the specified split.
|
|
53
|
-
|
|
54
|
-
Returns:
|
|
55
|
-
A list of tuples, where the first value indicates the
|
|
56
|
-
sample index which the second its corresponding slice
|
|
57
|
-
index.
|
|
58
|
-
"""
|
|
59
|
-
split_indices = set(self._get_split_indices())
|
|
60
|
-
indices: List[Tuple[int, int]] = []
|
|
61
|
-
random_generator = np.random.default_rng(seed=self._seed)
|
|
62
|
-
|
|
63
|
-
for sample_idx in range(len(self._volume_files)):
|
|
64
|
-
if sample_idx not in split_indices:
|
|
65
|
-
continue
|
|
66
|
-
|
|
67
|
-
segmentation_nii = io.read_nifti(self._segmentation_file(sample_idx))
|
|
68
|
-
segmentation = io.nifti_to_array(segmentation_nii)
|
|
69
|
-
tumor_filter = segmentation == 2
|
|
70
|
-
tumor_slice_filter = tumor_filter.sum(axis=(0, 1)) > 0
|
|
71
|
-
|
|
72
|
-
if tumor_filter.sum() == 0:
|
|
73
|
-
continue
|
|
74
|
-
|
|
75
|
-
liver_filter = segmentation == 1
|
|
76
|
-
liver_slice_filter = liver_filter.sum(axis=(0, 1)) > 0
|
|
77
|
-
|
|
78
|
-
liver_and_tumor_filter = liver_slice_filter & tumor_slice_filter
|
|
79
|
-
liver_only_filter = liver_slice_filter & ~tumor_slice_filter
|
|
80
|
-
|
|
81
|
-
n_slice_samples = min(liver_and_tumor_filter.sum(), liver_only_filter.sum())
|
|
82
|
-
tumor_indices = list(np.where(liver_and_tumor_filter)[0])
|
|
83
|
-
tumor_indices = list(
|
|
84
|
-
random_generator.choice(tumor_indices, size=n_slice_samples, replace=False)
|
|
85
|
-
)
|
|
86
|
-
|
|
87
|
-
liver_indices = list(np.where(liver_only_filter)[0])
|
|
88
|
-
liver_indices = list(
|
|
89
|
-
random_generator.choice(liver_indices, size=n_slice_samples, replace=False)
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
indices.extend([(sample_idx, slice_idx) for slice_idx in tumor_indices + liver_indices])
|
|
93
|
-
|
|
94
|
-
return list(indices)
|
/eva/vision/data/datasets/segmentation/{_total_segmentator.py → metadata/_total_segmentator.py}
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|