kaiko-eva 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaiko-eva might be problematic. Click here for more details.
- eva/core/data/datasets/base.py +7 -2
- eva/core/models/modules/head.py +4 -2
- eva/core/models/modules/typings.py +2 -2
- eva/core/models/transforms/__init__.py +2 -1
- eva/core/models/transforms/as_discrete.py +57 -0
- eva/core/models/wrappers/_utils.py +121 -1
- eva/core/trainers/_recorder.py +4 -1
- eva/core/utils/suppress_logs.py +28 -0
- eva/vision/data/__init__.py +2 -2
- eva/vision/data/dataloaders/__init__.py +5 -0
- eva/vision/data/dataloaders/collate_fn/__init__.py +5 -0
- eva/vision/data/dataloaders/collate_fn/collection.py +22 -0
- eva/vision/data/datasets/__init__.py +2 -2
- eva/vision/data/datasets/classification/bach.py +3 -4
- eva/vision/data/datasets/classification/bracs.py +3 -4
- eva/vision/data/datasets/classification/breakhis.py +3 -4
- eva/vision/data/datasets/classification/camelyon16.py +4 -5
- eva/vision/data/datasets/classification/crc.py +3 -4
- eva/vision/data/datasets/classification/gleason_arvaniti.py +3 -4
- eva/vision/data/datasets/classification/mhist.py +3 -4
- eva/vision/data/datasets/classification/panda.py +4 -5
- eva/vision/data/datasets/classification/patch_camelyon.py +3 -4
- eva/vision/data/datasets/classification/unitopatho.py +3 -4
- eva/vision/data/datasets/classification/wsi.py +6 -5
- eva/vision/data/datasets/segmentation/__init__.py +2 -2
- eva/vision/data/datasets/segmentation/_utils.py +47 -0
- eva/vision/data/datasets/segmentation/bcss.py +7 -8
- eva/vision/data/datasets/segmentation/btcv.py +236 -0
- eva/vision/data/datasets/segmentation/consep.py +6 -7
- eva/vision/data/datasets/segmentation/lits.py +9 -8
- eva/vision/data/datasets/segmentation/lits_balanced.py +2 -1
- eva/vision/data/datasets/segmentation/monusac.py +4 -5
- eva/vision/data/datasets/segmentation/total_segmentator_2d.py +12 -10
- eva/vision/data/datasets/vision.py +95 -4
- eva/vision/data/datasets/wsi.py +5 -5
- eva/vision/data/transforms/__init__.py +22 -3
- eva/vision/data/transforms/common/__init__.py +1 -2
- eva/vision/data/transforms/croppad/__init__.py +11 -0
- eva/vision/data/transforms/croppad/crop_foreground.py +110 -0
- eva/vision/data/transforms/croppad/rand_crop_by_pos_neg_label.py +109 -0
- eva/vision/data/transforms/croppad/spatial_pad.py +67 -0
- eva/vision/data/transforms/intensity/__init__.py +11 -0
- eva/vision/data/transforms/intensity/rand_scale_intensity.py +59 -0
- eva/vision/data/transforms/intensity/rand_shift_intensity.py +55 -0
- eva/vision/data/transforms/intensity/scale_intensity_ranged.py +56 -0
- eva/vision/data/transforms/spatial/__init__.py +7 -0
- eva/vision/data/transforms/spatial/flip.py +72 -0
- eva/vision/data/transforms/spatial/rotate.py +53 -0
- eva/vision/data/transforms/spatial/spacing.py +69 -0
- eva/vision/data/transforms/utility/__init__.py +5 -0
- eva/vision/data/transforms/utility/ensure_channel_first.py +51 -0
- eva/vision/data/tv_tensors/__init__.py +5 -0
- eva/vision/data/tv_tensors/volume.py +61 -0
- eva/vision/metrics/segmentation/monai_dice.py +9 -2
- eva/vision/models/modules/semantic_segmentation.py +32 -19
- eva/vision/models/networks/backbones/__init__.py +9 -2
- eva/vision/models/networks/backbones/pathology/__init__.py +11 -2
- eva/vision/models/networks/backbones/pathology/bioptimus.py +47 -1
- eva/vision/models/networks/backbones/pathology/hkust.py +69 -0
- eva/vision/models/networks/backbones/pathology/kaiko.py +18 -0
- eva/vision/models/networks/backbones/radiology/__init__.py +11 -0
- eva/vision/models/networks/backbones/radiology/swin_unetr.py +231 -0
- eva/vision/models/networks/backbones/radiology/voco.py +75 -0
- eva/vision/models/networks/decoders/segmentation/__init__.py +6 -2
- eva/vision/models/networks/decoders/segmentation/linear.py +5 -10
- eva/vision/models/networks/decoders/segmentation/semantic/__init__.py +8 -1
- eva/vision/models/networks/decoders/segmentation/semantic/swin_unetr.py +104 -0
- eva/vision/utils/io/__init__.py +2 -0
- eva/vision/utils/io/nifti.py +91 -11
- {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/METADATA +16 -12
- {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/RECORD +74 -58
- {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/WHEEL +1 -1
- eva/vision/data/datasets/classification/base.py +0 -96
- eva/vision/data/datasets/segmentation/base.py +0 -96
- eva/vision/data/transforms/common/resize_and_clamp.py +0 -51
- eva/vision/data/transforms/normalization/__init__.py +0 -6
- eva/vision/data/transforms/normalization/clamp.py +0 -43
- eva/vision/data/transforms/normalization/functional/__init__.py +0 -5
- eva/vision/data/transforms/normalization/functional/rescale_intensity.py +0 -28
- eva/vision/data/transforms/normalization/rescale_intensity.py +0 -53
- eva/vision/metrics/segmentation/BUILD +0 -1
- eva/vision/models/networks/backbones/torchhub/__init__.py +0 -5
- eva/vision/models/networks/backbones/torchhub/backbones.py +0 -61
- {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/entry_points.txt +0 -0
- {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: kaiko-eva
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.2
|
|
4
4
|
Summary: Evaluation Framework for oncology foundation models.
|
|
5
5
|
Keywords: machine-learning,evaluation-framework,oncology,foundation-models
|
|
6
6
|
Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
|
|
@@ -241,6 +241,7 @@ Requires-Dist: scikit-image>=0.24.0; extra == "vision"
|
|
|
241
241
|
Requires-Dist: imagesize>=1.4.1; extra == "vision"
|
|
242
242
|
Requires-Dist: scipy>=1.14.0; extra == "vision"
|
|
243
243
|
Requires-Dist: monai>=1.3.2; extra == "vision"
|
|
244
|
+
Requires-Dist: einops>=0.8.1; extra == "vision"
|
|
244
245
|
Provides-Extra: all
|
|
245
246
|
Requires-Dist: h5py>=3.10.0; extra == "all"
|
|
246
247
|
Requires-Dist: nibabel>=4.0.1; extra == "all"
|
|
@@ -253,6 +254,7 @@ Requires-Dist: scikit-image>=0.24.0; extra == "all"
|
|
|
253
254
|
Requires-Dist: imagesize>=1.4.1; extra == "all"
|
|
254
255
|
Requires-Dist: scipy>=1.14.0; extra == "all"
|
|
255
256
|
Requires-Dist: monai>=1.3.2; extra == "all"
|
|
257
|
+
Requires-Dist: einops>=0.8.1; extra == "all"
|
|
256
258
|
Description-Content-Type: text/markdown
|
|
257
259
|
|
|
258
260
|
<div align="center">
|
|
@@ -291,7 +293,7 @@ Check out the [documentation](https://kaiko-ai.github.io/eva/) for more informat
|
|
|
291
293
|
|
|
292
294
|
### Highlights:
|
|
293
295
|
- Easy and reliable benchmark of Oncology FMs
|
|
294
|
-
- Supports
|
|
296
|
+
- Supports patch-level classification, slide-level classification and semantic segmentation downstream tasks
|
|
295
297
|
- Automatic embedding inference and evaluation of a downstream task
|
|
296
298
|
- Native support of popular medical [datasets](https://kaiko-ai.github.io/eva/dev/datasets/) and models
|
|
297
299
|
- Produce statistics over multiple evaluation fits and multiple metrics
|
|
@@ -446,26 +448,28 @@ input, resulting in a faster evaluation.
|
|
|
446
448
|
Here are some examples to get you started:
|
|
447
449
|
|
|
448
450
|
- Perform a downstream offline **classification** evaluation of `DINO ViT-S/16`
|
|
449
|
-
on the `BACH` dataset with linear probing by first
|
|
450
|
-
and then performing 5 sequential fits:
|
|
451
|
+
on the `BACH` dataset with linear probing by first pre-calculating the embeddings:
|
|
451
452
|
```sh
|
|
452
|
-
|
|
453
|
-
|
|
453
|
+
DOWNLOAD_DATA=true \
|
|
454
|
+
MODEL_NAME=universal/vit_small_patch16_224_dino \
|
|
455
|
+
eva predict_fit --config https://raw.githubusercontent.com/kaiko-ai/eva/main/configs/vision/pathology/offline/classification/bach.yaml
|
|
454
456
|
```
|
|
455
457
|
|
|
456
|
-
- Perform a downstream online **segmentation** evaluation of `DINO ViT-S/16` on the
|
|
457
|
-
`MoNuSAC` dataset with the `ConvDecoderMS` decoder:
|
|
458
|
+
- Perform a downstream online **segmentation** evaluation of `DINO ViT-S/16` on the `MoNuSAC` dataset with the `ConvDecoderWithImage` decoder:
|
|
458
459
|
```sh
|
|
459
|
-
|
|
460
|
-
|
|
460
|
+
DOWNLOAD_DATA=true \
|
|
461
|
+
MODEL_NAME=universal/vit_small_patch16_224_dino \
|
|
462
|
+
eva fit --config https://raw.githubusercontent.com/kaiko-ai/eva/main/configs/vision/pathology/online/segmentation/monusac.yaml
|
|
461
463
|
```
|
|
462
464
|
|
|
465
|
+
By default `eva` will perform 5 evaluation runs using different seeds, however, you can control the number of runs through the `N_RUNS` environment variable or in the configuration file. The results will be saved to `./logs` by default, or to `OUTPUT_ROOT` if specified.
|
|
466
|
+
|
|
463
467
|
For more examples, take a look at the [configs](https://github.com/kaiko-ai/eva/tree/main/configs)
|
|
464
|
-
and [tutorials](https://kaiko-ai.github.io/eva/
|
|
468
|
+
and [tutorials](https://kaiko-ai.github.io/eva/main/user-guide/advanced/replicate_evaluations/).
|
|
465
469
|
|
|
466
470
|
> [!NOTE]
|
|
467
471
|
> All the datasets that support automatic download in the repo have by default the option to automatically download set to false.
|
|
468
|
-
> For automatic download you have to manually set the
|
|
472
|
+
> For automatic download you have to manually set the environment variable `DOWNLOAD_DATA=true` or in the configuration file `download=true`.
|
|
469
473
|
|
|
470
474
|
## Leaderboards
|
|
471
475
|
|
|
@@ -23,7 +23,7 @@ eva/core/data/datamodules/call.py,sha256=jjj9w3UXYuQB-qyCcw1EZpRJW10OC1I3dvgvsuQ
|
|
|
23
23
|
eva/core/data/datamodules/datamodule.py,sha256=_pK59oXDe53oDkmv6eoJUvfl44WlFkrbC8KXSRMs_20,5533
|
|
24
24
|
eva/core/data/datamodules/schemas.py,sha256=rzcf3uow6T6slVSwxEGDVmpi3QUvkiDoT_gCF3aMAEE,2262
|
|
25
25
|
eva/core/data/datasets/__init__.py,sha256=jWPxT3gjQjwS6HqVZAb7KhMgzgklPgHeH51iPxDh_Tg,493
|
|
26
|
-
eva/core/data/datasets/base.py,sha256=
|
|
26
|
+
eva/core/data/datasets/base.py,sha256=w8c9Jh3DiXVfBEdLfWpdE190lPAeOFqCQlXl7RqlDOU,2671
|
|
27
27
|
eva/core/data/datasets/classification/__init__.py,sha256=wJ2jD9YODftt-dMcMf0TbCjJt47qXYBKkD4-XXajvRQ,340
|
|
28
28
|
eva/core/data/datasets/classification/embeddings.py,sha256=hBO6dIRHAhoCaYb3ANc9fgvdBjyQNKPTrIhjc9y8-Ys,1108
|
|
29
29
|
eva/core/data/datasets/classification/multi_embeddings.py,sha256=4hQy4741NDKqWCpm3kGq7aC28DF5gcwUuIpYhnbTyeM,4601
|
|
@@ -73,21 +73,22 @@ eva/core/metrics/structs/schemas.py,sha256=ZaSrx0j_NfIwT7joMUD1LyrKdAXTLaeSzWYTH
|
|
|
73
73
|
eva/core/metrics/structs/typings.py,sha256=qJd-FiD2IhJgBeo8FyP0vpVUIH4RKb1k6zYvHtjUA04,388
|
|
74
74
|
eva/core/models/__init__.py,sha256=T6Fo886LxMj-Y58_ylzkPkFSnFR2aISiMIbuO_weC4s,430
|
|
75
75
|
eva/core/models/modules/__init__.py,sha256=QJWJ42BceXZBzDGgk5FHBcCaRrB9egTFKVF6gDsBYfM,255
|
|
76
|
-
eva/core/models/modules/head.py,sha256=
|
|
76
|
+
eva/core/models/modules/head.py,sha256=bZ45RBPi3N8sjvKyt2_TeKWI1eB6GyBeGzV6J11ERO8,5225
|
|
77
77
|
eva/core/models/modules/inference.py,sha256=ih-0Rr2oNf2N6maiXPOW7XH5KVwUT1_MOxnJKOhJ1uQ,978
|
|
78
78
|
eva/core/models/modules/module.py,sha256=LtjYxTZb7UY0owonmt_yQ5EySw3sX-xD9HLN2io8EK4,6697
|
|
79
|
-
eva/core/models/modules/typings.py,sha256=
|
|
79
|
+
eva/core/models/modules/typings.py,sha256=LPR8JdIid2gJZpjMG1FcH5OZ60JlFOj_LupIh__2k_8,803
|
|
80
80
|
eva/core/models/modules/utils/__init__.py,sha256=ScLCHwQfzlg_UsHVi5sf_SavUkh9secwtRn_umC_qA8,325
|
|
81
81
|
eva/core/models/modules/utils/batch_postprocess.py,sha256=RwnDcjJy3uvVirpgx_80Q2CUYKfJKipVwjyX7AF2CKw,3088
|
|
82
82
|
eva/core/models/modules/utils/checkpoint.py,sha256=Zp42rtmjgUC4VUMwFyG5aar-E0Hc5i7qUsxkV7AVKkE,700
|
|
83
83
|
eva/core/models/modules/utils/grad.py,sha256=bl8qb8g4Nhg1KAGfbEV_9HTKkoT0azRwfs9KGX9swGs,706
|
|
84
84
|
eva/core/models/networks/__init__.py,sha256=yqx6UmG1Eg3vb1O_tnK_axnJWabEl9ULkDWiPN440Xc,85
|
|
85
85
|
eva/core/models/networks/mlp.py,sha256=thk-x4pviE3fCaMW9k3I2Oe5_DxfC-CqUrtolvVdXug,2418
|
|
86
|
-
eva/core/models/transforms/__init__.py,sha256=
|
|
86
|
+
eva/core/models/transforms/__init__.py,sha256=AOy_2VY3ITLRk2PMqe6xfErvV7V2_XsnPQwEMhovxOU,333
|
|
87
|
+
eva/core/models/transforms/as_discrete.py,sha256=1w2NmcLzEuyPhaoVXl6jZTdblk7DPf6W6gQ_qi96hQM,1901
|
|
87
88
|
eva/core/models/transforms/extract_cls_features.py,sha256=tFRd4H-eGFIGCfZt6wuZGibDmAoNXKSsn15bBw0IDdc,1482
|
|
88
89
|
eva/core/models/transforms/extract_patch_features.py,sha256=k50jTLPWxbfvciH9QZSzTAGqWwDSVpXAteme_Qg2d6E,2202
|
|
89
90
|
eva/core/models/wrappers/__init__.py,sha256=jaiANQdbO-IPgH8U-Y0ftFsuuCAM5i5KuYRHauKw5k8,450
|
|
90
|
-
eva/core/models/wrappers/_utils.py,sha256=
|
|
91
|
+
eva/core/models/wrappers/_utils.py,sha256=ZWe9Ih_0kH5Wg_AQAtAn77LZ_CODAve5u3G12ifLNsc,4902
|
|
91
92
|
eva/core/models/wrappers/base.py,sha256=xKMUSXk93wI67p_wmh7jujK-bxvIefO1noYaAJN_5Ak,1359
|
|
92
93
|
eva/core/models/wrappers/from_function.py,sha256=_vKBwtfilCNCnOaJTofE6l5bM2K3qJ8GyBT-0CM5FXY,1831
|
|
93
94
|
eva/core/models/wrappers/from_torchhub.py,sha256=OAImGKRG4pfDXHsoriykC_iiO8QvK3nAWnQCE0mIGuk,3285
|
|
@@ -95,7 +96,7 @@ eva/core/models/wrappers/huggingface.py,sha256=5CoNieivdjwvoawo7dZtWfYZkW-Mey1j0
|
|
|
95
96
|
eva/core/models/wrappers/onnx.py,sha256=-iV-IlczTvTTEQuJycZeSVWdSl2kVJXc1eeRLgQQZ7Q,1834
|
|
96
97
|
eva/core/trainers/__init__.py,sha256=jhsKJF7HAae7EOiG3gKIAHH_h3dZlTE2JRcCHJmOzJc,208
|
|
97
98
|
eva/core/trainers/_logging.py,sha256=gi4FqPy2GuVmh0WZY6mYwF7zMPvnoFA050B0XdCP6PU,2571
|
|
98
|
-
eva/core/trainers/_recorder.py,sha256=
|
|
99
|
+
eva/core/trainers/_recorder.py,sha256=uD17l_WVveFuWuann59VU9iJ-Jumdh9F6vnAcL3M_FU,7855
|
|
99
100
|
eva/core/trainers/_utils.py,sha256=M3h8lVhUmkeSiEXpX9hRdMvThGFCnTP15gv-hd1CZkc,321
|
|
100
101
|
eva/core/trainers/functional.py,sha256=rLtQZw8TcAa4NYIf901TmoQiJDNm4RGVLN-64nku3Jo,4445
|
|
101
102
|
eva/core/trainers/trainer.py,sha256=a3OwLWOZKDqxayrd0ugUmxJKyQx6XDb4GHtdL8-AEV0,4826
|
|
@@ -109,6 +110,7 @@ eva/core/utils/multiprocessing.py,sha256=BWX8AW_KPLgIIlbsPG1kYdtbHPx6Dklw13bu4u8
|
|
|
109
110
|
eva/core/utils/operations.py,sha256=eoC_ScuHUMDCuk08j1bosiQZdPrgiIODqqheR9MtJHQ,641
|
|
110
111
|
eva/core/utils/parser.py,sha256=2czmwEGJJ6PtmaD86s9I14P-_sek4DmDCkEatRGT5sI,725
|
|
111
112
|
eva/core/utils/progress_bar.py,sha256=KvvsM_v3_Fhb4JvbEEPHb4PJMokg6mNLj-o6dkfzcMc,499
|
|
113
|
+
eva/core/utils/suppress_logs.py,sha256=pOk1076J0mKWn2lgDqEVC1g65FXhA_2IkC4LBEWhnwQ,902
|
|
112
114
|
eva/core/utils/workers.py,sha256=hfx63M82qNg0Dwhre2tl53MnhtRsV7APaDONM9nhVB8,634
|
|
113
115
|
eva/vision/__init__.py,sha256=oUZXFYjwtkWzi8An0uS5Xc84pLKintlXe2iti8zW6BQ,480
|
|
114
116
|
eva/vision/callbacks/__init__.py,sha256=su1V73L0dDVYWSyvV_lnWbszDi2KikRraF7OsgeaKl4,139
|
|
@@ -116,46 +118,58 @@ eva/vision/callbacks/loggers/__init__.py,sha256=td1JRJbE08nsGIZdO64_yLC3FUuMDp0k
|
|
|
116
118
|
eva/vision/callbacks/loggers/batch/__init__.py,sha256=DVYP7Aonbi4wg_ERHRj_8kb87Ee_75wRZzdduJ_icQk,173
|
|
117
119
|
eva/vision/callbacks/loggers/batch/base.py,sha256=hcAd5iiHvjZ0DIf4Qt4ENT54D6ky_1OO4rKQZqeo-1k,3628
|
|
118
120
|
eva/vision/callbacks/loggers/batch/segmentation.py,sha256=GYh2kfexW5pUZ0BdApYJI3e8xsuNkjIzkj5jnuKtHR4,6886
|
|
119
|
-
eva/vision/data/__init__.py,sha256=
|
|
120
|
-
eva/vision/data/
|
|
121
|
+
eva/vision/data/__init__.py,sha256=zuLOC8ExyeQGlwib1LB70RedrTC9w0siOhFTQIRb0V8,137
|
|
122
|
+
eva/vision/data/dataloaders/__init__.py,sha256=7AOD_UF3hMokrGxJ2tbawH44ujQaesDsaW-3HWorYv8,128
|
|
123
|
+
eva/vision/data/dataloaders/collate_fn/__init__.py,sha256=GCvJaeILmAc_-lhGw8yzj2cC2KG4i1PvSWAyVzPKvVo,146
|
|
124
|
+
eva/vision/data/dataloaders/collate_fn/collection.py,sha256=45s9fKjVBnqfnuGWmJZMtt_DDGnfuf7qkWe0QmxXMKo,611
|
|
125
|
+
eva/vision/data/datasets/__init__.py,sha256=NW034jrOnpDwYBQfsTjWG6jDJY_dPWKV-cq37HkBm10,1014
|
|
121
126
|
eva/vision/data/datasets/_utils.py,sha256=epPcaYE4w2_LtUKLLQJh6qQxUNVBe22JA06k4WUerYQ,1430
|
|
122
127
|
eva/vision/data/datasets/_validators.py,sha256=77WZj8ewsuxUjW5WegJ-7zDuR6WdF5JbaOYdywhKIK4,2594
|
|
123
128
|
eva/vision/data/datasets/classification/__init__.py,sha256=5fOGZxKGPeMCf3Jd9qAOYADPrkZnYg97_QE4DC79AMI,1074
|
|
124
|
-
eva/vision/data/datasets/classification/bach.py,sha256=
|
|
125
|
-
eva/vision/data/datasets/classification/
|
|
126
|
-
eva/vision/data/datasets/classification/
|
|
127
|
-
eva/vision/data/datasets/classification/
|
|
128
|
-
eva/vision/data/datasets/classification/
|
|
129
|
-
eva/vision/data/datasets/classification/
|
|
130
|
-
eva/vision/data/datasets/classification/
|
|
131
|
-
eva/vision/data/datasets/classification/
|
|
132
|
-
eva/vision/data/datasets/classification/
|
|
133
|
-
eva/vision/data/datasets/classification/
|
|
134
|
-
eva/vision/data/datasets/classification/
|
|
135
|
-
eva/vision/data/datasets/
|
|
136
|
-
eva/vision/data/datasets/segmentation/__init__.py,sha256=hGNr7BM_StxvmlOKWWfHp615qgsrB6BB3qMOiYhE0Og,791
|
|
129
|
+
eva/vision/data/datasets/classification/bach.py,sha256=Qzkb0aUNR8yMXwjDx8RmIfvPYVmlUW3dippMKrIVqzU,5410
|
|
130
|
+
eva/vision/data/datasets/classification/bracs.py,sha256=FYe33SmdlFOAl1Ef10uXN7Y8kvlbKuWBqIycFPHtMMU,3325
|
|
131
|
+
eva/vision/data/datasets/classification/breakhis.py,sha256=e01gguDCHvp6U18exCm5svWr8EpM_HLbLAq9KJifkpo,6883
|
|
132
|
+
eva/vision/data/datasets/classification/camelyon16.py,sha256=7E0ju4cctUyprBK063TVXLBN4Fp6cKMICoquv3e5JhQ,8261
|
|
133
|
+
eva/vision/data/datasets/classification/crc.py,sha256=sv18Lw4iUqGkYXEQN-kyZV_Foc_X15praVR4nN_klkg,5648
|
|
134
|
+
eva/vision/data/datasets/classification/gleason_arvaniti.py,sha256=z1OQlxZYx-n3S2wcfu-GuchySRw0E70PURJDsvejFjE,5871
|
|
135
|
+
eva/vision/data/datasets/classification/mhist.py,sha256=I-guWIucQZBHdSx-TWP24NXHf9IA9lU4hyfPZbJop0g,3033
|
|
136
|
+
eva/vision/data/datasets/classification/panda.py,sha256=HVfCvByyajdo5o_waqTpzZWCbQXQqPjvvyS5I0NAvns,7277
|
|
137
|
+
eva/vision/data/datasets/classification/patch_camelyon.py,sha256=1yXkfP680qxkQUFAPKRFbZv0cHAFx23s2vvT9th2nKM,7149
|
|
138
|
+
eva/vision/data/datasets/classification/unitopatho.py,sha256=IO3msEsuOnmdcYZxF-eBpo0K97y54rWFmCb_KxuF4bk,5129
|
|
139
|
+
eva/vision/data/datasets/classification/wsi.py,sha256=YMGxU8ECjudizt_uXUevuPS8k66HxtEQ7M2IZJmL6kE,4079
|
|
140
|
+
eva/vision/data/datasets/segmentation/__init__.py,sha256=YA7qx4B-pfsILfONa2AfIQHKzDnv8l0sHwjsSXa5-vQ,765
|
|
137
141
|
eva/vision/data/datasets/segmentation/_total_segmentator.py,sha256=DTaQaAisY7j1h0-zYk1_81Sr4b3D9PTMieYX0PMPtIc,3127
|
|
138
|
-
eva/vision/data/datasets/segmentation/_utils.py,sha256=
|
|
139
|
-
eva/vision/data/datasets/segmentation/
|
|
140
|
-
eva/vision/data/datasets/segmentation/
|
|
141
|
-
eva/vision/data/datasets/segmentation/consep.py,sha256=
|
|
142
|
+
eva/vision/data/datasets/segmentation/_utils.py,sha256=aXUHrnbefP6-OgSvDQHqssFKhUwETul_8aosqYiOfm8,3065
|
|
143
|
+
eva/vision/data/datasets/segmentation/bcss.py,sha256=rqk6VqK0QCHLFnMnDuHd1JPJVK5_C6WnsmnNSKBw6Uo,8230
|
|
144
|
+
eva/vision/data/datasets/segmentation/btcv.py,sha256=GNgr8pLx7uvZ2pxnYZ8N9SfB9luduMTM9IQ1OHPgBxI,8257
|
|
145
|
+
eva/vision/data/datasets/segmentation/consep.py,sha256=SBH1vD3RjFNRMVeo07d2PqSAInZsWHR2d3xCpCoDVpM,6014
|
|
142
146
|
eva/vision/data/datasets/segmentation/embeddings.py,sha256=RsTuAwGEJPnWPY7q3pwcjmqtEj0wtRBNRBD4a0RcGtA,1218
|
|
143
|
-
eva/vision/data/datasets/segmentation/lits.py,sha256=
|
|
144
|
-
eva/vision/data/datasets/segmentation/lits_balanced.py,sha256=
|
|
145
|
-
eva/vision/data/datasets/segmentation/monusac.py,sha256=
|
|
146
|
-
eva/vision/data/datasets/segmentation/total_segmentator_2d.py,sha256=
|
|
147
|
+
eva/vision/data/datasets/segmentation/lits.py,sha256=AsKsTQZBNXlYU_UllBrdr04rS8K4TDkG_vqR-aVr5ik,7267
|
|
148
|
+
eva/vision/data/datasets/segmentation/lits_balanced.py,sha256=OQ2AK6-wLE0uMvgQJtfBJTUJqS_WBfmsJXgBfe4gU8A,3451
|
|
149
|
+
eva/vision/data/datasets/segmentation/monusac.py,sha256=iv9-MFaTsGfGV1u6_lQNcSEeSpmVBDQC1Oa123iEtu0,8410
|
|
150
|
+
eva/vision/data/datasets/segmentation/total_segmentator_2d.py,sha256=3cWpJkZmJ7IUJhigw69YLFOg2_-yzXSLGXqWVPUsn8Y,16978
|
|
147
151
|
eva/vision/data/datasets/structs.py,sha256=RaTDW-B36PumcR5gymhCiX-r8GiKqIFcjqoEEjjFyUE,389
|
|
148
|
-
eva/vision/data/datasets/vision.py,sha256
|
|
149
|
-
eva/vision/data/datasets/wsi.py,sha256
|
|
150
|
-
eva/vision/data/transforms/__init__.py,sha256=
|
|
151
|
-
eva/vision/data/transforms/common/__init__.py,sha256=
|
|
152
|
-
eva/vision/data/transforms/common/resize_and_clamp.py,sha256=f9-YIX0S9GMAXHP7TWlyRlGfZIVvHgoBHqQ8PzaKbKs,1736
|
|
152
|
+
eva/vision/data/datasets/vision.py,sha256=-_WRiyICMgqABR6Ay_RKBMfsPGwgx9MQfCA7WChHo24,3219
|
|
153
|
+
eva/vision/data/datasets/wsi.py,sha256=dEAT_Si_Qb3qdSovUPeoiWeoPb7m-NGYqq44e3UXHk8,8384
|
|
154
|
+
eva/vision/data/transforms/__init__.py,sha256=Bv1aPvjahteAZzVGSuxzHz2LRwa63NV7IcoPzKUt_fY,720
|
|
155
|
+
eva/vision/data/transforms/common/__init__.py,sha256=ZHzpdr-THc9CgFFbAVMWUiZrUNUiHnCDM8GYhM7tMfU,138
|
|
153
156
|
eva/vision/data/transforms/common/resize_and_crop.py,sha256=GI1HTkbJ9qg4p8c6vk_XkXO0Qi6mBeUeiZIA0jVtmAw,1360
|
|
154
|
-
eva/vision/data/transforms/
|
|
155
|
-
eva/vision/data/transforms/
|
|
156
|
-
eva/vision/data/transforms/
|
|
157
|
-
eva/vision/data/transforms/
|
|
158
|
-
eva/vision/data/transforms/
|
|
157
|
+
eva/vision/data/transforms/croppad/__init__.py,sha256=d36WGe9x39p-d7VymRM29qdquv8YEa0RfsTfwt7Cou4,375
|
|
158
|
+
eva/vision/data/transforms/croppad/crop_foreground.py,sha256=3o27nOgxfRo8ap45lpmnaiAIZ08kdyp14vYpr4BC8zc,4865
|
|
159
|
+
eva/vision/data/transforms/croppad/rand_crop_by_pos_neg_label.py,sha256=8CwMYAOQgOIb1Uw1jc219aqY3s2tCWd6r-2nU7kqOLc,5538
|
|
160
|
+
eva/vision/data/transforms/croppad/spatial_pad.py,sha256=j5V2vvgGcf75GzGyAT7mGgpvlEOS2BnAcThRdt7Und4,2857
|
|
161
|
+
eva/vision/data/transforms/intensity/__init__.py,sha256=mNp6pi0pnHcA24kQuiGHzMb4XLRaR0Lgi-Vb7Nl-Aoo,408
|
|
162
|
+
eva/vision/data/transforms/intensity/rand_scale_intensity.py,sha256=DDcFWTmq5UbwISO9qGIPOQJ72rx7JQWtVi2OxggLzyE,2041
|
|
163
|
+
eva/vision/data/transforms/intensity/rand_shift_intensity.py,sha256=9YNREhRoCzLOt2C21daum62cbB53ZRcYOSuSW_Jz7eQ,1974
|
|
164
|
+
eva/vision/data/transforms/intensity/scale_intensity_ranged.py,sha256=VLvYZYG6jQCuR5poJsAlhIFjw6VjPEpcDPKBlJTjYBM,1873
|
|
165
|
+
eva/vision/data/transforms/spatial/__init__.py,sha256=k7C_p4fMZd7A00ikldAMsprYDedKrlMjKQB6BLA5InA,284
|
|
166
|
+
eva/vision/data/transforms/spatial/flip.py,sha256=jfRc-wPBvG58OtCNU3GrOkb57kcRddRqpwcAdCB0_No,2553
|
|
167
|
+
eva/vision/data/transforms/spatial/rotate.py,sha256=FpMTAPWtgrG10yQ3R1_Ii6obPcn3boNWOuLhsblxUbQ,1793
|
|
168
|
+
eva/vision/data/transforms/spatial/spacing.py,sha256=T1UhqK-OhhbLQxzejMyI8BQzYRF44PNc02Qap4nk1hY,2695
|
|
169
|
+
eva/vision/data/transforms/utility/__init__.py,sha256=TjncS2aOgRJwjjRuIvmr4eRz2nKVg6b76tThp4UlzII,163
|
|
170
|
+
eva/vision/data/transforms/utility/ensure_channel_first.py,sha256=jpnV7oWel1ZSL2VUf3wUdbB8xM2OFD8R6xpHcPCJVgw,1945
|
|
171
|
+
eva/vision/data/tv_tensors/__init__.py,sha256=qla_QYWN52vP0IlTmHlTZF4kLh9xj-Zy-WxQgXakYyk,125
|
|
172
|
+
eva/vision/data/tv_tensors/volume.py,sha256=VlWTIbswNv-aUqEWd1EJgoqEH60d-gNALPG815TD_W8,2381
|
|
159
173
|
eva/vision/data/wsi/__init__.py,sha256=vfSfyogsj4OS1sGKfsYWyj2O5ZMT9iqkc1lvcuZJVGI,422
|
|
160
174
|
eva/vision/data/wsi/backends/__init__.py,sha256=wX7cjeT7ktX8sH6lRDEEU5cgRKLH6RhPyey16aJthJ4,2251
|
|
161
175
|
eva/vision/data/wsi/backends/base.py,sha256=0oFzMc3zklLyqyD_kzDKekydeFyDReqjBBj1qJLdM9Y,4094
|
|
@@ -177,45 +191,47 @@ eva/vision/metrics/__init__.py,sha256=zXOc1Idgfk86CGE5yBHn3B22iD5tRyfl4H-kTSB2dC
|
|
|
177
191
|
eva/vision/metrics/defaults/__init__.py,sha256=ncQ9uH5q5SpfalyPX6dINPRLk34HLw6z9u8ny_HHbFQ,174
|
|
178
192
|
eva/vision/metrics/defaults/segmentation/__init__.py,sha256=ve6dwyfhJGYBYKS6l6OySCBs32JnEBFnvhAyNvj-Uqo,191
|
|
179
193
|
eva/vision/metrics/defaults/segmentation/multiclass.py,sha256=MUBp-PIyiJB2VVV_NintRrP7Ha2lJ75_3xvqSdeDYwE,2855
|
|
180
|
-
eva/vision/metrics/segmentation/BUILD,sha256=Nf7BYWWe1USoFEIsIiEVZ8sa05J5FPkMJ-UIMDLrU8o,17
|
|
181
194
|
eva/vision/metrics/segmentation/__init__.py,sha256=7iz3fFNd-iBuNyxdeSfsgp6D7oZtmPsbyA0ZKRzzRCw,402
|
|
182
195
|
eva/vision/metrics/segmentation/_utils.py,sha256=_ubv2sP1-f_dLKy8Y4wLkj5ed56fAFLURfv1shQWVcs,2402
|
|
183
196
|
eva/vision/metrics/segmentation/dice.py,sha256=H_U6XSZcieX0xb6aptxxW1s-Jshs8Lp4P1SAwjdwntM,2905
|
|
184
197
|
eva/vision/metrics/segmentation/generalized_dice.py,sha256=T57An-lBVefnlv6dIWVRNghFxy0e0K470xwla0TbCSk,2436
|
|
185
198
|
eva/vision/metrics/segmentation/mean_iou.py,sha256=2PjqTa_VAtnW4nxHzT93uBKgnml7INU-wt_jR68RM54,2104
|
|
186
|
-
eva/vision/metrics/segmentation/monai_dice.py,sha256=
|
|
199
|
+
eva/vision/metrics/segmentation/monai_dice.py,sha256=I_DX6r4y5d9QzxI3WyMV14uwt1uqrKlRqbNHqGMtmy0,2421
|
|
187
200
|
eva/vision/metrics/wrappers/__init__.py,sha256=V4z3hradMa6CQgTkk1bc2cbZzCgcoIYw7-hufMK3D_4,128
|
|
188
201
|
eva/vision/metrics/wrappers/monai.py,sha256=FNa1yHN2U3vO6BGqS0BFm8uJAL6DCzSE4XOFCV4aBjg,885
|
|
189
202
|
eva/vision/models/__init__.py,sha256=a-P6JL73A3miHQnqgqUz07XtVmQB_o4DqPImk5rEATo,275
|
|
190
203
|
eva/vision/models/modules/__init__.py,sha256=vaM_V6OF2s0lYjralP8dzv8mAtv_xIMZItfXgz0NZg8,156
|
|
191
|
-
eva/vision/models/modules/semantic_segmentation.py,sha256=
|
|
204
|
+
eva/vision/models/modules/semantic_segmentation.py,sha256=f04QwxSt8x9oVHf5JMeN5b_PMPmfLcso_icDBma1ToE,7930
|
|
192
205
|
eva/vision/models/networks/__init__.py,sha256=j43IurizNlAyKPH2jwDHaeq49L2QvwbHWqUaptA1mG4,100
|
|
193
206
|
eva/vision/models/networks/abmil.py,sha256=N1eH4fn1nXmgXurSQyQIxxonv7nsqeeuPWaQSHeltfs,6796
|
|
194
|
-
eva/vision/models/networks/backbones/__init__.py,sha256=
|
|
207
|
+
eva/vision/models/networks/backbones/__init__.py,sha256=mvYVtmJOvYLCXDX52hP6dzQxj9cQikwSeBZvEDNyNmU,347
|
|
195
208
|
eva/vision/models/networks/backbones/_utils.py,sha256=V7xeod4mElEuuO1TRW0xJE051cUyS1Saraw3-KcK1Mw,1667
|
|
196
|
-
eva/vision/models/networks/backbones/pathology/__init__.py,sha256=
|
|
197
|
-
eva/vision/models/networks/backbones/pathology/bioptimus.py,sha256=
|
|
209
|
+
eva/vision/models/networks/backbones/pathology/__init__.py,sha256=JZ1mhKm4w89JTrXDfTM02OyFWtDuxRhhvpytDk_t500,1386
|
|
210
|
+
eva/vision/models/networks/backbones/pathology/bioptimus.py,sha256=NrS0WJqiJKjDYT3odQGLPgnzMuCbJfWoW1Dal-L9F50,2626
|
|
198
211
|
eva/vision/models/networks/backbones/pathology/gigapath.py,sha256=mfGXtKhY7XLpKQQAFNVZYsM-aeHCEbOVUrxpAEOr-l8,955
|
|
199
212
|
eva/vision/models/networks/backbones/pathology/histai.py,sha256=X_we3U7GK91RrXyOX2PJB-YFDF2ozdL2fzZhNxm9SVU,1914
|
|
200
|
-
eva/vision/models/networks/backbones/pathology/
|
|
213
|
+
eva/vision/models/networks/backbones/pathology/hkust.py,sha256=bZpzx7EvK4CVefNnJmyz-2Ta-WdYDwEDzf-zWoZkoCQ,2308
|
|
214
|
+
eva/vision/models/networks/backbones/pathology/kaiko.py,sha256=lVzgWhgFn1iOlfSSxsX2cH16rrFQFjzdaF6_HS1y-6c,4517
|
|
201
215
|
eva/vision/models/networks/backbones/pathology/lunit.py,sha256=ku4lr9pWeeHatHN4x4OVgwlve9sVqiRqIbgI0PXLiqg,2160
|
|
202
216
|
eva/vision/models/networks/backbones/pathology/mahmood.py,sha256=VYoVWrMNkoaEqa0och-GbwGd0VISQmbtzk1dSBZ1M0I,2464
|
|
203
217
|
eva/vision/models/networks/backbones/pathology/owkin.py,sha256=uWJV5fgY7UZX6ilgGzkPY9fnlOiF03W7E8rc9TmlHGg,1231
|
|
204
218
|
eva/vision/models/networks/backbones/pathology/paige.py,sha256=MjOLgdEKk8tdAIpCiHelasGwPE7xgzaooW6EE7IsuEE,1642
|
|
219
|
+
eva/vision/models/networks/backbones/radiology/__init__.py,sha256=pD8ijQZRaX_Lu3tPBV73qUVaAURDrB_2pEyyBdRZmis,294
|
|
220
|
+
eva/vision/models/networks/backbones/radiology/swin_unetr.py,sha256=n5lJkoKjxKogs5Q_XuKh7Q5J96Bgln5W4ShL-VwSZXs,7976
|
|
221
|
+
eva/vision/models/networks/backbones/radiology/voco.py,sha256=sICZnsxQYnqYEmauhB6CBmaqpzBoAB6CpXJjNm5FesI,2464
|
|
205
222
|
eva/vision/models/networks/backbones/registry.py,sha256=anjILtEHHB6Ltwiw22h1bsgWtIjh_l5_fkPh87K7-d0,1631
|
|
206
223
|
eva/vision/models/networks/backbones/timm/__init__.py,sha256=cZH3av9gIZcvEVD0rwKsI-MEq7zPqaW4dQ0E05CksvQ,128
|
|
207
224
|
eva/vision/models/networks/backbones/timm/backbones.py,sha256=fCTiwqU6NhQ-ccAMzmpPDddXkFzRAB3mw4lcQ9um_PU,1646
|
|
208
|
-
eva/vision/models/networks/backbones/torchhub/__init__.py,sha256=zBLJBvkwKJ1jD7M3Wt5BE6Cx-R8G2YRoyPG7p2V-3nQ,147
|
|
209
|
-
eva/vision/models/networks/backbones/torchhub/backbones.py,sha256=hgCCoP8AdRSsli0w9a_PRNB-UR36-SLLhBIW0BFrkdE,1911
|
|
210
225
|
eva/vision/models/networks/backbones/universal/__init__.py,sha256=MAlkALSJ2_w6spSbB7NmKlL0Jsk1YKEycatdI0xO0_I,252
|
|
211
226
|
eva/vision/models/networks/backbones/universal/vit.py,sha256=kpUCoXpefR34hRNlQDFK9lGr4oqS8Mn5vTLKWZ-gaOs,1820
|
|
212
227
|
eva/vision/models/networks/decoders/__init__.py,sha256=RXFWmoYw2i6E9VOUCJmU8c72icHannVuo-cUKy6fnLM,200
|
|
213
|
-
eva/vision/models/networks/decoders/segmentation/__init__.py,sha256=
|
|
228
|
+
eva/vision/models/networks/decoders/segmentation/__init__.py,sha256=SqmxtzxwBRF8g2hsiqe0o3Nr0HFK97azTnWLyqsYigY,652
|
|
214
229
|
eva/vision/models/networks/decoders/segmentation/base.py,sha256=b2TIJKiJR9vejVRpNyedMJLPTrpHhAEXvco8atb9TPU,411
|
|
215
230
|
eva/vision/models/networks/decoders/segmentation/decoder2d.py,sha256=A7vz0LJ_YweftpKeEBJm0Y3N7hbVLDSIkAajaQv1UgE,4456
|
|
216
|
-
eva/vision/models/networks/decoders/segmentation/linear.py,sha256
|
|
217
|
-
eva/vision/models/networks/decoders/segmentation/semantic/__init__.py,sha256=
|
|
231
|
+
eva/vision/models/networks/decoders/segmentation/linear.py,sha256=PZeEIH0ybgxgIKtmcflh8jsARo5NQqkgoGbpAZd7yj4,4650
|
|
232
|
+
eva/vision/models/networks/decoders/segmentation/semantic/__init__.py,sha256=2yol7W1ARXL-Ge7gYxjUzaGTjH6nfMBlNqQJHprEWGg,539
|
|
218
233
|
eva/vision/models/networks/decoders/segmentation/semantic/common.py,sha256=fPTb0T-2FiOU-jT81ynASKaW7fJiRk6vQjuPkzHOluc,2530
|
|
234
|
+
eva/vision/models/networks/decoders/segmentation/semantic/swin_unetr.py,sha256=ODUpnJrpDQl0m8CC2SPnE_lpFflzS0GSiCZOmrjL6uQ,3373
|
|
219
235
|
eva/vision/models/networks/decoders/segmentation/semantic/with_image.py,sha256=I5PyGKKo8DcXYcw4xlCFzuavRJNRrzGT-szpDidMPXI,3516
|
|
220
236
|
eva/vision/models/networks/decoders/segmentation/typings.py,sha256=8zAqIJLlQdCjsx-Dl4lnF4BB1VxTg_AyIquBVwpZlHg,537
|
|
221
237
|
eva/vision/models/wrappers/__init__.py,sha256=ogmr-eeVuGaOCcsuxSp6PGyauP2QqWTb8dGTtbC7lRU,210
|
|
@@ -224,14 +240,14 @@ eva/vision/models/wrappers/from_timm.py,sha256=Z38Nb1i6OPKkgvFZOvGx-O3AZQuscf1zR
|
|
|
224
240
|
eva/vision/utils/__init__.py,sha256=vaUovprE743SmyFH8l6uk4pYSWpI4zxn7lN0EwePTJI,96
|
|
225
241
|
eva/vision/utils/colormap.py,sha256=sP1F0JCX3abZfFgdxEjLJO-LhNYKjXZvXxs03ZgrEvI,2876
|
|
226
242
|
eva/vision/utils/convert.py,sha256=fqGmKrg5-JJLrTkTXB4YDcWTudXPrO1gGjsckVRUesU,1881
|
|
227
|
-
eva/vision/utils/io/__init__.py,sha256=
|
|
243
|
+
eva/vision/utils/io/__init__.py,sha256=Oa4CjmqXN0wzkG1PW79zSsHrN1jlI7_VJ5NSXLKx0eA,652
|
|
228
244
|
eva/vision/utils/io/_utils.py,sha256=JzOt7Frj6ScF_aNjFtfHBn4ROnl6NhUZucmQhLc4Cww,768
|
|
229
245
|
eva/vision/utils/io/image.py,sha256=IdOkr5MYqhYHz8U9drZ7wULTM3YHwCWSjZlu_Qdl4GQ,2053
|
|
230
246
|
eva/vision/utils/io/mat.py,sha256=qpGifyjmpE0Xhv567Si7-zxKrgkgE0sywP70cHiLFGU,808
|
|
231
|
-
eva/vision/utils/io/nifti.py,sha256=
|
|
247
|
+
eva/vision/utils/io/nifti.py,sha256=TFMgNhLqIK3sl3RjIRXEABM7FmSQjqVOwk1vXkuvX2w,4983
|
|
232
248
|
eva/vision/utils/io/text.py,sha256=qYgfo_ZaDZWfG02NkVVYzo5QFySqdCCz5uLA9d-zXtI,701
|
|
233
|
-
kaiko_eva-0.2.
|
|
234
|
-
kaiko_eva-0.2.
|
|
235
|
-
kaiko_eva-0.2.
|
|
236
|
-
kaiko_eva-0.2.
|
|
237
|
-
kaiko_eva-0.2.
|
|
249
|
+
kaiko_eva-0.2.2.dist-info/METADATA,sha256=hiFFWrNu2fMZd7VLI08q4EDOc0IU6X4T00RGkHC0QT8,25363
|
|
250
|
+
kaiko_eva-0.2.2.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
|
251
|
+
kaiko_eva-0.2.2.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
|
|
252
|
+
kaiko_eva-0.2.2.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
|
|
253
|
+
kaiko_eva-0.2.2.dist-info/RECORD,,
|
|
@@ -1,96 +0,0 @@
|
|
|
1
|
-
"""Base for image classification datasets."""
|
|
2
|
-
|
|
3
|
-
import abc
|
|
4
|
-
from typing import Any, Callable, Dict, List, Tuple
|
|
5
|
-
|
|
6
|
-
import torch
|
|
7
|
-
from torchvision import tv_tensors
|
|
8
|
-
from typing_extensions import override
|
|
9
|
-
|
|
10
|
-
from eva.vision.data.datasets import vision
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class ImageClassification(vision.VisionDataset[Tuple[tv_tensors.Image, torch.Tensor]], abc.ABC):
|
|
14
|
-
"""Image classification abstract dataset."""
|
|
15
|
-
|
|
16
|
-
def __init__(
|
|
17
|
-
self,
|
|
18
|
-
transforms: Callable | None = None,
|
|
19
|
-
) -> None:
|
|
20
|
-
"""Initializes the image classification dataset.
|
|
21
|
-
|
|
22
|
-
Args:
|
|
23
|
-
transforms: A function/transform which returns a transformed
|
|
24
|
-
version of the raw data samples.
|
|
25
|
-
"""
|
|
26
|
-
super().__init__()
|
|
27
|
-
|
|
28
|
-
self._transforms = transforms
|
|
29
|
-
|
|
30
|
-
@property
|
|
31
|
-
def classes(self) -> List[str] | None:
|
|
32
|
-
"""Returns the list with names of the dataset names."""
|
|
33
|
-
|
|
34
|
-
@property
|
|
35
|
-
def class_to_idx(self) -> Dict[str, int] | None:
|
|
36
|
-
"""Returns a mapping of the class name to its target index."""
|
|
37
|
-
|
|
38
|
-
def load_metadata(self, index: int) -> Dict[str, Any] | None:
|
|
39
|
-
"""Returns the dataset metadata.
|
|
40
|
-
|
|
41
|
-
Args:
|
|
42
|
-
index: The index of the data sample to return the metadata of.
|
|
43
|
-
|
|
44
|
-
Returns:
|
|
45
|
-
The sample metadata.
|
|
46
|
-
"""
|
|
47
|
-
|
|
48
|
-
@abc.abstractmethod
|
|
49
|
-
def load_image(self, index: int) -> tv_tensors.Image:
|
|
50
|
-
"""Returns the `index`'th image sample.
|
|
51
|
-
|
|
52
|
-
Args:
|
|
53
|
-
index: The index of the data sample to load.
|
|
54
|
-
|
|
55
|
-
Returns:
|
|
56
|
-
The image as a numpy array.
|
|
57
|
-
"""
|
|
58
|
-
|
|
59
|
-
@abc.abstractmethod
|
|
60
|
-
def load_target(self, index: int) -> torch.Tensor:
|
|
61
|
-
"""Returns the `index`'th target sample.
|
|
62
|
-
|
|
63
|
-
Args:
|
|
64
|
-
index: The index of the data sample to load.
|
|
65
|
-
|
|
66
|
-
Returns:
|
|
67
|
-
The sample target as an array.
|
|
68
|
-
"""
|
|
69
|
-
|
|
70
|
-
@abc.abstractmethod
|
|
71
|
-
@override
|
|
72
|
-
def __len__(self) -> int:
|
|
73
|
-
raise NotImplementedError
|
|
74
|
-
|
|
75
|
-
@override
|
|
76
|
-
def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]:
|
|
77
|
-
image = self.load_image(index)
|
|
78
|
-
target = self.load_target(index)
|
|
79
|
-
image, target = self._apply_transforms(image, target)
|
|
80
|
-
return image, target, self.load_metadata(index) or {}
|
|
81
|
-
|
|
82
|
-
def _apply_transforms(
|
|
83
|
-
self, image: tv_tensors.Image, target: torch.Tensor
|
|
84
|
-
) -> Tuple[tv_tensors.Image, torch.Tensor]:
|
|
85
|
-
"""Applies the transforms to the provided data and returns them.
|
|
86
|
-
|
|
87
|
-
Args:
|
|
88
|
-
image: The desired image.
|
|
89
|
-
target: The target of the image.
|
|
90
|
-
|
|
91
|
-
Returns:
|
|
92
|
-
A tuple with the image and the target transformed.
|
|
93
|
-
"""
|
|
94
|
-
if self._transforms is not None:
|
|
95
|
-
image, target = self._transforms(image, target)
|
|
96
|
-
return image, target
|
|
@@ -1,96 +0,0 @@
|
|
|
1
|
-
"""Base for image segmentation datasets."""
|
|
2
|
-
|
|
3
|
-
import abc
|
|
4
|
-
from typing import Any, Callable, Dict, List, Tuple
|
|
5
|
-
|
|
6
|
-
from torchvision import tv_tensors
|
|
7
|
-
from typing_extensions import override
|
|
8
|
-
|
|
9
|
-
from eva.vision.data.datasets import vision
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class ImageSegmentation(vision.VisionDataset[Tuple[tv_tensors.Image, tv_tensors.Mask]], abc.ABC):
|
|
13
|
-
"""Image segmentation abstract dataset."""
|
|
14
|
-
|
|
15
|
-
def __init__(self, transforms: Callable | None = None) -> None:
|
|
16
|
-
"""Initializes the image segmentation base class.
|
|
17
|
-
|
|
18
|
-
Args:
|
|
19
|
-
transforms: A function/transforms that takes in an
|
|
20
|
-
image and a label and returns the transformed versions of both.
|
|
21
|
-
"""
|
|
22
|
-
super().__init__()
|
|
23
|
-
|
|
24
|
-
self._transforms = transforms
|
|
25
|
-
|
|
26
|
-
@property
|
|
27
|
-
def classes(self) -> List[str] | None:
|
|
28
|
-
"""Returns the list with names of the dataset names."""
|
|
29
|
-
|
|
30
|
-
@property
|
|
31
|
-
def class_to_idx(self) -> Dict[str, int] | None:
|
|
32
|
-
"""Returns a mapping of the class name to its target index."""
|
|
33
|
-
|
|
34
|
-
@abc.abstractmethod
|
|
35
|
-
def load_image(self, index: int) -> tv_tensors.Image:
|
|
36
|
-
"""Loads and returns the `index`'th image sample.
|
|
37
|
-
|
|
38
|
-
Args:
|
|
39
|
-
index: The index of the data sample to load.
|
|
40
|
-
|
|
41
|
-
Returns:
|
|
42
|
-
An image torchvision tensor (channels, height, width).
|
|
43
|
-
"""
|
|
44
|
-
|
|
45
|
-
@abc.abstractmethod
|
|
46
|
-
def load_mask(self, index: int) -> tv_tensors.Mask:
|
|
47
|
-
"""Returns the `index`'th target masks sample.
|
|
48
|
-
|
|
49
|
-
Args:
|
|
50
|
-
index: The index of the data sample target masks to load.
|
|
51
|
-
|
|
52
|
-
Returns:
|
|
53
|
-
The semantic mask as a (H x W) shaped tensor with integer
|
|
54
|
-
values which represent the pixel class id.
|
|
55
|
-
"""
|
|
56
|
-
|
|
57
|
-
def load_metadata(self, index: int) -> Dict[str, Any] | None:
|
|
58
|
-
"""Returns the dataset metadata.
|
|
59
|
-
|
|
60
|
-
Args:
|
|
61
|
-
index: The index of the data sample to return the metadata of.
|
|
62
|
-
If `None`, it will return the metadata of the current dataset.
|
|
63
|
-
|
|
64
|
-
Returns:
|
|
65
|
-
The sample metadata.
|
|
66
|
-
"""
|
|
67
|
-
|
|
68
|
-
@abc.abstractmethod
|
|
69
|
-
@override
|
|
70
|
-
def __len__(self) -> int:
|
|
71
|
-
raise NotImplementedError
|
|
72
|
-
|
|
73
|
-
@override
|
|
74
|
-
def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, tv_tensors.Mask, Dict[str, Any]]:
|
|
75
|
-
image = self.load_image(index)
|
|
76
|
-
mask = self.load_mask(index)
|
|
77
|
-
metadata = self.load_metadata(index) or {}
|
|
78
|
-
image_tensor, mask_tensor = self._apply_transforms(image, mask)
|
|
79
|
-
return image_tensor, mask_tensor, metadata
|
|
80
|
-
|
|
81
|
-
def _apply_transforms(
|
|
82
|
-
self, image: tv_tensors.Image, mask: tv_tensors.Mask
|
|
83
|
-
) -> Tuple[tv_tensors.Image, tv_tensors.Mask]:
|
|
84
|
-
"""Applies the transforms to the provided data and returns them.
|
|
85
|
-
|
|
86
|
-
Args:
|
|
87
|
-
image: The desired image.
|
|
88
|
-
mask: The target segmentation mask.
|
|
89
|
-
|
|
90
|
-
Returns:
|
|
91
|
-
A tuple with the image and the masks transformed.
|
|
92
|
-
"""
|
|
93
|
-
if self._transforms is not None:
|
|
94
|
-
image, mask = self._transforms(image, mask)
|
|
95
|
-
|
|
96
|
-
return image, mask
|
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
"""Specialized transforms for resizing, clamping and range normalizing."""
|
|
2
|
-
|
|
3
|
-
from typing import Callable, Sequence, Tuple
|
|
4
|
-
|
|
5
|
-
from torchvision.transforms import v2
|
|
6
|
-
|
|
7
|
-
from eva.vision.data.transforms import normalization
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class ResizeAndClamp(v2.Compose):
|
|
11
|
-
"""Resizes, crops, clamps and normalizes an input image."""
|
|
12
|
-
|
|
13
|
-
def __init__(
|
|
14
|
-
self,
|
|
15
|
-
size: int | Sequence[int] = 224,
|
|
16
|
-
clamp_range: Tuple[int, int] = (-1024, 1024),
|
|
17
|
-
mean: Sequence[float] = (0.0, 0.0, 0.0),
|
|
18
|
-
std: Sequence[float] = (1.0, 1.0, 1.0),
|
|
19
|
-
) -> None:
|
|
20
|
-
"""Initializes the transform object.
|
|
21
|
-
|
|
22
|
-
Args:
|
|
23
|
-
size: Desired output size of the crop. If size is an `int` instead
|
|
24
|
-
of sequence like (h, w), a square crop (size, size) is made.
|
|
25
|
-
clamp_range: The lower and upper bound to clamp the pixel values.
|
|
26
|
-
mean: Sequence of means for each image channel.
|
|
27
|
-
std: Sequence of standard deviations for each image channel.
|
|
28
|
-
"""
|
|
29
|
-
self._size = size
|
|
30
|
-
self._clamp_range = clamp_range
|
|
31
|
-
self._mean = mean
|
|
32
|
-
self._std = std
|
|
33
|
-
|
|
34
|
-
super().__init__(transforms=self._build_transforms())
|
|
35
|
-
|
|
36
|
-
def _build_transforms(self) -> Sequence[Callable]:
|
|
37
|
-
"""Builds and returns the list of transforms."""
|
|
38
|
-
transforms = [
|
|
39
|
-
v2.Resize(size=self._size),
|
|
40
|
-
v2.CenterCrop(size=self._size),
|
|
41
|
-
normalization.Clamp(out_range=self._clamp_range),
|
|
42
|
-
normalization.RescaleIntensity(
|
|
43
|
-
in_range=self._clamp_range,
|
|
44
|
-
out_range=(0.0, 1.0),
|
|
45
|
-
),
|
|
46
|
-
v2.Normalize(
|
|
47
|
-
mean=self._mean,
|
|
48
|
-
std=self._std,
|
|
49
|
-
),
|
|
50
|
-
]
|
|
51
|
-
return transforms
|
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
"""Image clamp transform."""
|
|
2
|
-
|
|
3
|
-
import functools
|
|
4
|
-
from typing import Any, Dict, Tuple
|
|
5
|
-
|
|
6
|
-
import torch
|
|
7
|
-
import torchvision.transforms.v2 as torch_transforms
|
|
8
|
-
from torchvision import tv_tensors
|
|
9
|
-
from typing_extensions import override
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class Clamp(torch_transforms.Transform):
|
|
13
|
-
"""Clamps all elements in input into a specific range."""
|
|
14
|
-
|
|
15
|
-
def __init__(self, out_range: Tuple[int, int]) -> None:
|
|
16
|
-
"""Initializes the transform.
|
|
17
|
-
|
|
18
|
-
Args:
|
|
19
|
-
out_range: The lower and upper bound of the range to
|
|
20
|
-
be clamped to.
|
|
21
|
-
"""
|
|
22
|
-
super().__init__()
|
|
23
|
-
|
|
24
|
-
self._out_range = out_range
|
|
25
|
-
|
|
26
|
-
@functools.singledispatchmethod
|
|
27
|
-
@override
|
|
28
|
-
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
|
|
29
|
-
return inpt
|
|
30
|
-
|
|
31
|
-
@_transform.register(torch.Tensor)
|
|
32
|
-
def _(self, inpt: torch.Tensor, params: Dict[str, Any]) -> Any:
|
|
33
|
-
return torch.clamp(inpt, min=self._out_range[0], max=self._out_range[1])
|
|
34
|
-
|
|
35
|
-
@_transform.register(tv_tensors.Image)
|
|
36
|
-
def _(self, inpt: tv_tensors.Image, params: Dict[str, Any]) -> Any:
|
|
37
|
-
inpt_clamp = torch.clamp(inpt, min=self._out_range[0], max=self._out_range[1])
|
|
38
|
-
return tv_tensors.wrap(inpt_clamp, like=inpt)
|
|
39
|
-
|
|
40
|
-
@_transform.register(tv_tensors.BoundingBoxes)
|
|
41
|
-
@_transform.register(tv_tensors.Mask)
|
|
42
|
-
def _(self, inpt: tv_tensors.BoundingBoxes | tv_tensors.Mask, params: Dict[str, Any]) -> Any:
|
|
43
|
-
return inpt
|