kaiko-eva 0.1.0__tar.gz → 0.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaiko-eva might be problematic. Click here for more details.
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/PKG-INFO +3 -34
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/README.md +2 -33
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/pyproject.toml +1 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/base.py +3 -4
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/dataloaders/dataloader.py +2 -2
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/splitting/random.py +6 -5
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/splitting/stratified.py +12 -6
- kaiko_eva-0.1.3/src/eva/core/losses/__init__.py +5 -0
- kaiko_eva-0.1.3/src/eva/core/losses/cross_entropy.py +27 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/__init__.py +0 -4
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/defaults/__init__.py +0 -2
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/modules/module.py +9 -9
- kaiko_eva-0.1.3/src/eva/core/models/transforms/extract_cls_features.py +41 -0
- kaiko_eva-0.1.3/src/eva/core/models/transforms/extract_patch_features.py +59 -0
- kaiko_eva-0.1.3/src/eva/core/utils/progress_bar.py +15 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/__init__.py +4 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/__init__.py +2 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/camelyon16.py +4 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/panda.py +17 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/wsi.py +4 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/__init__.py +2 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/consep.py +2 -2
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/lits.py +49 -29
- kaiko_eva-0.1.3/src/eva/vision/data/datasets/segmentation/lits_balanced.py +93 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/monusac.py +7 -7
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/total_segmentator_2d.py +2 -2
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/wsi.py +37 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/coordinates.py +9 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/_utils.py +2 -8
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/random.py +4 -2
- kaiko_eva-0.1.3/src/eva/vision/losses/__init__.py +5 -0
- kaiko_eva-0.1.3/src/eva/vision/losses/dice.py +107 -0
- kaiko_eva-0.1.3/src/eva/vision/metrics/__init__.py +11 -0
- kaiko_eva-0.1.3/src/eva/vision/metrics/defaults/__init__.py +7 -0
- {kaiko_eva-0.1.0/src/eva/core → kaiko_eva-0.1.3/src/eva/vision}/metrics/defaults/segmentation/__init__.py +1 -1
- {kaiko_eva-0.1.0/src/eva/core → kaiko_eva-0.1.3/src/eva/vision}/metrics/defaults/segmentation/multiclass.py +2 -1
- kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation/BUILD +1 -0
- kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation/__init__.py +9 -0
- kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation/_utils.py +69 -0
- {kaiko_eva-0.1.0/src/eva/core/metrics → kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation}/generalized_dice.py +12 -10
- kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation/mean_iou.py +57 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/modules/semantic_segmentation.py +4 -3
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/_utils.py +12 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/__init__.py +4 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/histai.py +8 -2
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/mahmood.py +2 -9
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/owkin.py +14 -0
- kaiko_eva-0.1.3/src/eva/vision/models/networks/backbones/pathology/paige.py +51 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/decoders/__init__.py +1 -1
- kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/__init__.py +19 -0
- kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/base.py +16 -0
- kaiko_eva-0.1.0/src/eva/vision/models/networks/decoders/segmentation/conv2d.py → kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/decoder2d.py +26 -22
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/decoders/segmentation/linear.py +2 -2
- kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/semantic/__init__.py +12 -0
- {kaiko_eva-0.1.0/src/eva/vision/models/networks/decoders/segmentation → kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/semantic}/common.py +3 -3
- kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/semantic/with_image.py +94 -0
- kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/typings.py +18 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/__init__.py +7 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/nifti.py +19 -4
- kaiko_eva-0.1.3/tests/eva/assets/vision/datasets/lits/Training_Batch2/segmentation-31.nii +3 -0
- kaiko_eva-0.1.3/tests/eva/assets/vision/datasets/lits/Training_Batch2/segmentation-45.nii +3 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/splitting/test_random.py +23 -4
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/splitting/test_stratified.py +28 -7
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/wrappers/test_huggingface.py +5 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_camelyon16.py +5 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_panda.py +18 -3
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_wsi.py +5 -0
- kaiko_eva-0.1.3/tests/eva/vision/data/datasets/segmentation/test_lits_balanced.py +59 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/test_wsi.py +10 -2
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/samplers/test_foreground_grid.py +13 -7
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/samplers/test_grid.py +28 -5
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/samplers/test_random.py +27 -5
- kaiko_eva-0.1.3/tests/eva/vision/metrics/defaults/__init__.py +1 -0
- {kaiko_eva-0.1.0/tests/eva/core → kaiko_eva-0.1.3/tests/eva/vision}/metrics/defaults/segmentation/test_multiclass.py +1 -1
- kaiko_eva-0.1.3/tests/eva/vision/metrics/segmentation/__init__.py +1 -0
- kaiko_eva-0.1.3/tests/eva/vision/metrics/segmentation/_utils.py +32 -0
- kaiko_eva-0.1.3/tests/eva/vision/metrics/segmentation/test_generalized_dice.py +24 -0
- kaiko_eva-0.1.3/tests/eva/vision/metrics/segmentation/test_mean_iou.py +24 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/modules/test_semantic_segmentation.py +1 -1
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/decoders/segmentation/conv.py +4 -4
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/decoders/segmentation/linear.py +1 -1
- kaiko_eva-0.1.0/src/eva/core/metrics/mean_iou.py +0 -120
- kaiko_eva-0.1.0/src/eva/core/models/transforms/extract_cls_features.py +0 -33
- kaiko_eva-0.1.0/src/eva/core/models/transforms/extract_patch_features.py +0 -47
- kaiko_eva-0.1.0/src/eva/vision/losses/__init__.py +0 -5
- kaiko_eva-0.1.0/src/eva/vision/losses/dice.py +0 -40
- kaiko_eva-0.1.0/src/eva/vision/models/networks/decoders/decoder.py +0 -7
- kaiko_eva-0.1.0/src/eva/vision/models/networks/decoders/segmentation/__init__.py +0 -11
- kaiko_eva-0.1.0/tests/eva/assets/vision/datasets/lits/Training_Batch2/segmentation-31.nii +0 -3
- kaiko_eva-0.1.0/tests/eva/assets/vision/datasets/lits/Training_Batch2/segmentation-45.nii +0 -3
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/LICENSE +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/__main__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/__version__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/config.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/_manifest.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/classification.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/segmentation.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/typings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/cli/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/cli/cli.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/cli/logo.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/cli/setup.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/dataloaders/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datamodules/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datamodules/call.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datamodules/datamodule.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datamodules/schemas.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/base.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/classification/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/classification/embeddings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/classification/multi_embeddings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/dataset.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/embeddings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/samplers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/samplers/sampler.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/splitting/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/dtype/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/dtype/array.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/padding/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/padding/pad_2d_tensor.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/sampling/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/sampling/sample_from_axis.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/interface/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/interface/interface.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/loggers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/loggers/dummy.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/loggers/experimental_loggers.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/loggers/log/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/loggers/log/image.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/loggers/log/parameters.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/loggers/log/utils.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/loggers/loggers.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/average_loss.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/binary_balanced_accuracy.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/defaults/classification/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/defaults/classification/binary.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/defaults/classification/multiclass.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/collection.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/metric.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/module.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/schemas.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/typings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/modules/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/modules/head.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/modules/inference.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/modules/typings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/modules/utils/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/modules/utils/batch_postprocess.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/modules/utils/grad.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/networks/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/networks/mlp.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/transforms/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/_utils.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/base.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/from_function.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/huggingface.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/onnx.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/trainers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/trainers/_logging.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/trainers/_recorder.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/trainers/_utils.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/trainers/functional.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/trainers/trainer.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/clone.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/io/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/io/dataframe.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/memory.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/multiprocessing.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/operations.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/parser.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/core/utils/workers.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/loggers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/loggers/batch/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/loggers/batch/base.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/loggers/batch/segmentation.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/_utils.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/_validators.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/bach.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/base.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/crc.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/mhist.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/patch_camelyon.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/_utils.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/base.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/bcss.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/embeddings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/structs.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/vision.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/common/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/common/resize_and_clamp.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/common/resize_and_crop.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/clamp.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/functional/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/functional/rescale_intensity.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/rescale_intensity.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/base.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/openslide.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/pil.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/tiffslide.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/mask.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/base.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/foreground_grid.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/grid.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/modules/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/abmil.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/bioptimus.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/gigapath.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/kaiko.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/lunit.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/registry.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/timm/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/timm/backbones.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/universal/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/universal/vit.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/wrappers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/wrappers/from_registry.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/models/wrappers/from_timm.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/colormap.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/convert.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/_utils.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/image.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/mat.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/text.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/_cli.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_0_shape_8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_1_shape_8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8_list.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8_list.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_4_shape_1x8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_5_shape_1x8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8_list.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8_list.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/manifest.csv +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_0_shape_6x8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_1_shape_3x8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_2_shape_1x8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_3_shape_2x8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_4_shape_5x8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_5_shape_3x8.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8_list.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8_list.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8_list.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8_list.pt +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/images/random_bgr_32x32.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/images/random_grayscale_32x32.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b002.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b003.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b004.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b005.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b006.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is002.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is003.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is004.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is005.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is006.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv002.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv003.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv004.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv005.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv006.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n002.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n003.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n004.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n005.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n006.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-A2-A0CM-DX1_xmin18562_ymin56852_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-A7-A4SD-DX1_xmin53807_ymin11871_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-AR-A0TS-DX1_xmin118843_ymin22812_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-AR-A1AQ-DX1_xmin18171_ymin38296_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-C8-A3XY-DX1_xmin76297_ymin35510_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-D8-A1XQ-DX1_xmin61261_ymin33317_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-EW-A1P4-DX1_xmin17256_ymin35430_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-GI-A2C9-DX1_xmin20882_ymin11843_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-OL-A5D6-DX1_xmin115108_ymin40554_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-OL-A5D7-DX1_xmin114443_ymin22490_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-A2-A0CM-DX1_xmin18562_ymin56852_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-A7-A4SD-DX1_xmin53807_ymin11871_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-AR-A0TS-DX1_xmin118843_ymin22812_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-AR-A1AQ-DX1_xmin18171_ymin38296_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-C8-A3XY-DX1_xmin76297_ymin35510_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-D8-A1XQ-DX1_xmin61261_ymin33317_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-EW-A1P4-DX1_xmin17256_ymin35430_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-GI-A2C9-DX1_xmin20882_ymin11843_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-OL-A5D6-DX1_xmin115108_ymin40554_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-OL-A5D7-DX1_xmin114443_ymin22490_MPP-0.2500.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_002.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/testing/reference.csv +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_002.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_002.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Images/test_1.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Images/test_2.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Images/test_3.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Labels/test_1.mat +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Labels/test_2.mat +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Labels/test_3.mat +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Images/train_1.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Images/train_2.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Images/train_3.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Images/train_4.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Labels/train_1.mat +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Labels/train_2.mat +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Labels/train_3.mat +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Labels/train_4.mat +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/ADI/ADI-SIHVHHPH.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/ADI/ADI-SIHWWQMY.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/BACK/BACK-YYYHKNMK.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/BACK/BACK-YYYMDTNW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/DEB/DEB-YYYRSHLP.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/DEB/DEB-YYYTCTDR.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/LYM/LYM-YYWRPGDD.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/LYM/LYM-YYYTKMWW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/MUC/MUC-YYYNWSAM.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/MUC/MUC-YYYRQDLW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/MUS/MUS-YYYNVQVQ.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/MUS/MUS-YYYRWWNH.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/NORM/NORM-YYTTIRVD.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/NORM/NORM-YYVAFTKA.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/STR/STR-YYYHNSSM.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/STR/STR-YYYWVWFG.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/TUM/TUM-YYYSGWYW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/TUM/TUM-YYYYQFVN.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/ADI/ADI-SIHVHHPH.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/ADI/ADI-SIHWWQMY.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/BACK/BACK-YYYHKNMK.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/BACK/BACK-YYYMDTNW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/DEB/DEB-YYYRSHLP.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/DEB/DEB-YYYTCTDR.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/LYM/LYM-YYWRPGDD.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/LYM/LYM-YYYTKMWW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/MUC/MUC-YYYNWSAM.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/MUC/MUC-YYYRQDLW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/MUS/MUS-YYYNVQVQ.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/MUS/MUS-YYYRWWNH.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/NORM/NORM-YYTTIRVD.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/NORM/NORM-YYVAFTKA.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/STR/STR-YYYHNSSM.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/STR/STR-YYYWVWFG.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/TUM/TUM-YYYSGWYW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/TUM/TUM-YYYYQFVN.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/ADI/ADI-SIHVHHPH.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/ADI/ADI-SIHWWQMY.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/BACK/BACK-YYYHKNMK.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/BACK/BACK-YYYMDTNW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/DEB/DEB-YYYRSHLP.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/DEB/DEB-YYYTCTDR.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/LYM/LYM-YYWRPGDD.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/LYM/LYM-YYYTKMWW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/MUC/MUC-YYYNWSAM.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/MUC/MUC-YYYRQDLW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/MUS/MUS-YYYNVQVQ.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/MUS/MUS-YYYRWWNH.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/NORM/NORM-YYTTIRVD.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/NORM/NORM-YYVAFTKA.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/STR/STR-YYYHNSSM.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/STR/STR-YYYWVWFG.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/TUM/TUM-YYYSGWYW.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/TUM/TUM-YYYYQFVN.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/lits/Training_Batch2/volume-31.nii +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/lits/Training_Batch2/volume-45.nii +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/annotations.csv +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aaa.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aab.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aac.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aae.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aaf.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aag.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aah.png +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_1.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_1.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_2.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_2.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_3.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_3.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JN-01Z-00-DX1/TCGA-2Z-A9JN-01Z-00-DX1_1.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JN-01Z-00-DX1/TCGA-2Z-A9JN-01Z-00-DX1_1.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-55-1594-01Z-00-DX1/TCGA-55-1594-01Z-00-DX1_003.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-55-1594-01Z-00-DX1/TCGA-55-1594-01Z-00-DX1_003.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-5P-A9K0-01Z-00-DX1/TCGA-5P-A9K0-01Z-00-DX1_3.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-5P-A9K0-01Z-00-DX1/TCGA-5P-A9K0-01Z-00-DX1_3.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-7760-01Z-00-DX1/TCGA-69-7760-01Z-00-DX1_001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-7760-01Z-00-DX1/TCGA-69-7760-01Z-00-DX1_001.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-A59K-01Z-00-DX1/TCGA-69-A59K-01Z-00-DX1_001.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-A59K-01Z-00-DX1/TCGA-69-A59K-01Z-00-DX1_001.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-A59K-01Z-00-DX1/TCGA-69-A59K-01Z-00-DX1_002.tif +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-A59K-01Z-00-DX1/TCGA-69-A59K-01Z-00-DX1_002.xml +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/0214df71ae527e2144021178c453d204.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/02d302a8d723fa00331f373091b29135.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/157565e23ba28d5a42f63f34f3dd4425.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/682a1fd346b6fff340afbdb80c2f7caf.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/8582b59b41635fa38401d1bddad66707.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/8c357871e57c5c60277230412f2d9028.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/979cf5a2fa4079eaf74343d6ff5e1b51.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/9dd40c0127d217bc4917e4db40e06e94.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/9ed8ec7bf90653bc4ca86b3ca53cbb96.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/a04310d441e8d2c7a5066627baeec9b6.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/fb8886059879eaac70139336cb525838.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_with_noisy_labels.csv +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_test_x.h5 +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_test_y.h5 +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_train_x.h5 +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_train_y.h5 +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_valid_x.h5 +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_valid_y.h5 +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/meta.csv +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/ct.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/aorta_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/brain_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/colon_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/semantic_labels/masks.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/ct.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/aorta_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/brain_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/colon_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/semantic_labels/masks.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/ct.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/aorta_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/brain_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/colon_small.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/semantic_labels/masks.nii.gz +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/wsi/0/a.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/wsi/0/b.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/wsi/1/a.tiff +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/wsi/manifest.csv +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/conftest.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/conftest.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/writers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/writers/embeddings/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/writers/embeddings/test_classification.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/dataloaders/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/dataloaders/test_dataloader.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/datamodules/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/datamodules/_utils.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/datamodules/test_datamodule.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/datamodules/test_schemas.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/datasets/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/datasets/classification/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/datasets/classification/test_embeddings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/datasets/classification/test_multi_embeddings.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/splitting/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/padding/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/padding/test_pad_2d_tensor.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/sampling/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/sampling/test_sample_from_axis.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/core/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/core/test_metric_module.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/core/test_schemas.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/defaults/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/defaults/classification/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/defaults/classification/test_binary.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/defaults/classification/test_multiclass.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/test_average_loss.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/metrics/test_binary_balanced_accuracy.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/conftest.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/test_head.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/test_inference.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/utils/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/utils/test_batch_postproces.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/networks/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/networks/test_mlp.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/wrappers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/wrappers/test_from_function.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/models/wrappers/test_onnx.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/test_cli.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/trainers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/trainers/test_recorder.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/utils/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/core/utils/test_operations.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_bach.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_crc.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_mhist.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_bcss.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_consep.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_lits.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_monusac.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/common/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/common/test_resize_and_clamp.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/common/test_resize_and_crop.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/normalization/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/normalization/functional/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/normalization/functional/test_rescale_intensity.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/samplers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/test_mask.py +0 -0
- {kaiko_eva-0.1.0/tests/eva/core → kaiko_eva-0.1.3/tests/eva/vision}/metrics/defaults/segmentation/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/modules/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/modules/conftest.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/backbones/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/backbones/test_registry.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/decoders/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/decoders/segmentation/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/test_abmil.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/wrappers/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/wrappers/test_backbone.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/models/wrappers/test_from_timm.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/test_vision_cli.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/utils/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/utils/io/__init__.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/utils/io/test_image.py +0 -0
- {kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/tests/eva/vision/utils/test_convert.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: kaiko-eva
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3
|
|
4
4
|
Summary: Evaluation Framework for oncology foundation models.
|
|
5
5
|
Keywords: machine-learning,evaluation-framework,oncology,foundation-models
|
|
6
6
|
Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
|
|
@@ -468,41 +468,10 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate
|
|
|
468
468
|
|
|
469
469
|
## Leaderboards
|
|
470
470
|
|
|
471
|
-
|
|
471
|
+
The following table shows the FMs we have evaluated with _`eva`_. For more detailed information about the evaluation process, please refer to our [documentation](https://kaiko-ai.github.io/eva/main/leaderboards/).
|
|
472
472
|
|
|
473
|
-
|
|
473
|
+

|
|
474
474
|
|
|
475
|
-
<br />
|
|
476
|
-
|
|
477
|
-
<div align="center">
|
|
478
|
-
|
|
479
|
-
| Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | CoNSeP | MoNuSAC |
|
|
480
|
-
|---------|-------|-------|-------|--------|------------|-------|------------|-------|
|
|
481
|
-
| ViT-S/16 _(random)_ <sup>[1]</sup> | 0.411|0.613|0.5|0.752|0.551|0.347|0.489|0.394|
|
|
482
|
-
| ViT-S/16 _(ImageNet)_ <sup>[1]</sup> | 0.675|0.936|0.827|0.861|0.751|0.676|0.54|0.512|
|
|
483
|
-
| DINO<sub>(p=16)</sub> <sup>[2]</sup> | 0.77|0.936|0.751|0.905|0.869|0.737|0.625|0.549|
|
|
484
|
-
| Phikon <sup>[3]</sup> | 0.715|0.942|0.766|0.925|0.879|0.784|0.68|0.554|
|
|
485
|
-
| UNI <sup>[4]</sup> | 0.797|0.95|0.835|0.939|0.933|0.774|0.67|0.575|
|
|
486
|
-
| ViT-S/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.8|0.949|0.831|0.902|0.897|0.77|0.622|0.573|
|
|
487
|
-
| ViT-S/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.825|0.948|0.826|0.887|0.879|0.741|0.677|0.617|
|
|
488
|
-
| ViT-B/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.846|0.959|0.839|0.906|0.891|0.753|0.647|0.572|
|
|
489
|
-
| ViT-B/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.867|0.952|0.814|0.921|0.939|0.761|0.706|0.661|
|
|
490
|
-
| ViT-L/14 _(kaiko.ai)_ <sup>[5]</sup> | 0.862|0.935|0.822|0.907|0.941|0.769|0.686|0.599|
|
|
491
|
-
|
|
492
|
-
_Table I: Linear probing evaluation of FMs on patch-level downstream datasets.<br> We report balanced accuracy
|
|
493
|
-
for classification tasks and generalized Dice score for semgetnation tasks, averaged over 5 runs. Results are
|
|
494
|
-
reported on the "test" split if available and otherwise on the "validation" split._
|
|
495
|
-
|
|
496
|
-
</div>
|
|
497
|
-
|
|
498
|
-
<br />
|
|
499
|
-
|
|
500
|
-
_References_:
|
|
501
|
-
1. _"Emerging properties in self-supervised vision transformers”_, [arXiv](https://arxiv.org/abs/2104.14294)
|
|
502
|
-
2. _"Benchmarking self-supervised learning on diverse pathology datasets”_, [arXiv](https://arxiv.org/abs/2212.04690)
|
|
503
|
-
3. _"Scaling self-supervised learning for histopathology with masked image modeling”_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1)
|
|
504
|
-
4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_, [arXiv](https://arxiv.org/abs/2308.15474)
|
|
505
|
-
5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_, [arXiv](https://arxiv.org/pdf/2404.15217)
|
|
506
475
|
|
|
507
476
|
## Contributing
|
|
508
477
|
|
|
@@ -212,41 +212,10 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate
|
|
|
212
212
|
|
|
213
213
|
## Leaderboards
|
|
214
214
|
|
|
215
|
-
|
|
215
|
+
The following table shows the FMs we have evaluated with _`eva`_. For more detailed information about the evaluation process, please refer to our [documentation](https://kaiko-ai.github.io/eva/main/leaderboards/).
|
|
216
216
|
|
|
217
|
-
|
|
217
|
+

|
|
218
218
|
|
|
219
|
-
<br />
|
|
220
|
-
|
|
221
|
-
<div align="center">
|
|
222
|
-
|
|
223
|
-
| Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | CoNSeP | MoNuSAC |
|
|
224
|
-
|---------|-------|-------|-------|--------|------------|-------|------------|-------|
|
|
225
|
-
| ViT-S/16 _(random)_ <sup>[1]</sup> | 0.411|0.613|0.5|0.752|0.551|0.347|0.489|0.394|
|
|
226
|
-
| ViT-S/16 _(ImageNet)_ <sup>[1]</sup> | 0.675|0.936|0.827|0.861|0.751|0.676|0.54|0.512|
|
|
227
|
-
| DINO<sub>(p=16)</sub> <sup>[2]</sup> | 0.77|0.936|0.751|0.905|0.869|0.737|0.625|0.549|
|
|
228
|
-
| Phikon <sup>[3]</sup> | 0.715|0.942|0.766|0.925|0.879|0.784|0.68|0.554|
|
|
229
|
-
| UNI <sup>[4]</sup> | 0.797|0.95|0.835|0.939|0.933|0.774|0.67|0.575|
|
|
230
|
-
| ViT-S/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.8|0.949|0.831|0.902|0.897|0.77|0.622|0.573|
|
|
231
|
-
| ViT-S/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.825|0.948|0.826|0.887|0.879|0.741|0.677|0.617|
|
|
232
|
-
| ViT-B/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.846|0.959|0.839|0.906|0.891|0.753|0.647|0.572|
|
|
233
|
-
| ViT-B/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.867|0.952|0.814|0.921|0.939|0.761|0.706|0.661|
|
|
234
|
-
| ViT-L/14 _(kaiko.ai)_ <sup>[5]</sup> | 0.862|0.935|0.822|0.907|0.941|0.769|0.686|0.599|
|
|
235
|
-
|
|
236
|
-
_Table I: Linear probing evaluation of FMs on patch-level downstream datasets.<br> We report balanced accuracy
|
|
237
|
-
for classification tasks and generalized Dice score for semgetnation tasks, averaged over 5 runs. Results are
|
|
238
|
-
reported on the "test" split if available and otherwise on the "validation" split._
|
|
239
|
-
|
|
240
|
-
</div>
|
|
241
|
-
|
|
242
|
-
<br />
|
|
243
|
-
|
|
244
|
-
_References_:
|
|
245
|
-
1. _"Emerging properties in self-supervised vision transformers”_, [arXiv](https://arxiv.org/abs/2104.14294)
|
|
246
|
-
2. _"Benchmarking self-supervised learning on diverse pathology datasets”_, [arXiv](https://arxiv.org/abs/2212.04690)
|
|
247
|
-
3. _"Scaling self-supervised learning for histopathology with masked image modeling”_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1)
|
|
248
|
-
4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_, [arXiv](https://arxiv.org/abs/2308.15474)
|
|
249
|
-
5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_, [arXiv](https://arxiv.org/pdf/2404.15217)
|
|
250
219
|
|
|
251
220
|
## Contributing
|
|
252
221
|
|
|
@@ -172,15 +172,14 @@ class EmbeddingsWriter(callbacks.BasePredictionWriter, abc.ABC):
|
|
|
172
172
|
|
|
173
173
|
def _check_if_exists(self) -> None:
|
|
174
174
|
"""Checks if the output directory already exists and if it should be overwritten."""
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
except FileExistsError as e:
|
|
175
|
+
os.makedirs(self._output_dir, exist_ok=True)
|
|
176
|
+
if os.path.exists(os.path.join(self._output_dir, "manifest.csv")) and not self._overwrite:
|
|
178
177
|
raise FileExistsError(
|
|
179
178
|
f"The embeddings output directory already exists: {self._output_dir}. This "
|
|
180
179
|
"either means that they have been computed before or that a wrong output "
|
|
181
180
|
"directory is being used. Consider using `eva fit` instead, selecting a "
|
|
182
181
|
"different output directory or setting overwrite=True."
|
|
183
|
-
)
|
|
182
|
+
)
|
|
184
183
|
os.makedirs(self._output_dir, exist_ok=True)
|
|
185
184
|
|
|
186
185
|
|
|
@@ -38,7 +38,7 @@ class DataLoader:
|
|
|
38
38
|
Mutually exclusive with `batch_size`, `shuffle`, `sampler` and `drop_last`.
|
|
39
39
|
"""
|
|
40
40
|
|
|
41
|
-
num_workers: int =
|
|
41
|
+
num_workers: int | None = None
|
|
42
42
|
"""How many workers to use for loading the data.
|
|
43
43
|
|
|
44
44
|
By default, it will use the number of CPUs available.
|
|
@@ -71,7 +71,7 @@ class DataLoader:
|
|
|
71
71
|
shuffle=self.shuffle,
|
|
72
72
|
sampler=self.sampler,
|
|
73
73
|
batch_sampler=self.batch_sampler,
|
|
74
|
-
num_workers=self.num_workers,
|
|
74
|
+
num_workers=self.num_workers or multiprocessing.cpu_count(),
|
|
75
75
|
collate_fn=self.collate_fn,
|
|
76
76
|
pin_memory=self.pin_memory,
|
|
77
77
|
drop_last=self.drop_last,
|
|
@@ -24,12 +24,13 @@ def random_split(
|
|
|
24
24
|
Returns:
|
|
25
25
|
The indices of the train, validation, and test sets as lists.
|
|
26
26
|
"""
|
|
27
|
-
|
|
28
|
-
|
|
27
|
+
total_ratio = train_ratio + val_ratio + test_ratio
|
|
28
|
+
if total_ratio > 1.0:
|
|
29
|
+
raise ValueError("The sum of the ratios must be lower or equal to 1.")
|
|
29
30
|
|
|
30
|
-
np.random.
|
|
31
|
-
n_samples = len(samples)
|
|
32
|
-
indices =
|
|
31
|
+
random_generator = np.random.default_rng(seed)
|
|
32
|
+
n_samples = int(total_ratio * len(samples))
|
|
33
|
+
indices = random_generator.permutation(len(samples))[:n_samples]
|
|
33
34
|
|
|
34
35
|
n_train = int(np.floor(train_ratio * n_samples))
|
|
35
36
|
n_val = n_samples - n_train if test_ratio == 0.0 else int(np.floor(val_ratio * n_samples)) or 1
|
|
@@ -28,10 +28,11 @@ def stratified_split(
|
|
|
28
28
|
"""
|
|
29
29
|
if len(samples) != len(targets):
|
|
30
30
|
raise ValueError("The number of samples and targets must be equal.")
|
|
31
|
-
if train_ratio + val_ratio + (test_ratio or 0)
|
|
32
|
-
raise ValueError("The sum of the ratios must be equal to 1.")
|
|
31
|
+
if train_ratio + val_ratio + (test_ratio or 0) > 1.0:
|
|
32
|
+
raise ValueError("The sum of the ratios must be lower or equal to 1.")
|
|
33
33
|
|
|
34
|
-
|
|
34
|
+
use_all_samples = train_ratio + val_ratio + test_ratio == 1
|
|
35
|
+
random_generator = np.random.default_rng(seed)
|
|
35
36
|
unique_classes, y_indices = np.unique(targets, return_inverse=True)
|
|
36
37
|
n_classes = unique_classes.shape[0]
|
|
37
38
|
|
|
@@ -39,18 +40,23 @@ def stratified_split(
|
|
|
39
40
|
|
|
40
41
|
for c in range(n_classes):
|
|
41
42
|
class_indices = np.where(y_indices == c)[0]
|
|
42
|
-
|
|
43
|
+
random_generator.shuffle(class_indices)
|
|
43
44
|
|
|
44
45
|
n_train = int(np.floor(train_ratio * len(class_indices))) or 1
|
|
45
46
|
n_val = (
|
|
46
47
|
len(class_indices) - n_train
|
|
47
|
-
if test_ratio == 0.0
|
|
48
|
+
if test_ratio == 0.0 and use_all_samples
|
|
48
49
|
else int(np.floor(val_ratio * len(class_indices))) or 1
|
|
49
50
|
)
|
|
50
51
|
|
|
51
52
|
train_indices.extend(class_indices[:n_train])
|
|
52
53
|
val_indices.extend(class_indices[n_train : n_train + n_val])
|
|
53
54
|
if test_ratio > 0.0:
|
|
54
|
-
|
|
55
|
+
n_test = (
|
|
56
|
+
len(class_indices) - n_train - n_val
|
|
57
|
+
if use_all_samples
|
|
58
|
+
else int(np.floor(test_ratio * len(class_indices))) or 1
|
|
59
|
+
)
|
|
60
|
+
test_indices.extend(class_indices[n_train + n_val : n_train + n_val + n_test])
|
|
55
61
|
|
|
56
62
|
return train_indices, val_indices, test_indices or None
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Cross-entropy based loss function."""
|
|
2
|
+
|
|
3
|
+
from typing import Sequence
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
from torch import nn
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class CrossEntropyLoss(nn.CrossEntropyLoss):
|
|
10
|
+
"""A wrapper around torch.nn.CrossEntropyLoss that accepts weights in list format.
|
|
11
|
+
|
|
12
|
+
Needed for .yaml file loading & class instantiation with jsonarparse.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self, *args, weight: Sequence[float] | torch.Tensor | None = None, **kwargs
|
|
17
|
+
) -> None:
|
|
18
|
+
"""Initialize the loss function.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
args: Positional arguments from the base class.
|
|
22
|
+
weight: A list of weights to assign to each class.
|
|
23
|
+
kwargs: Key-word arguments from the base class.
|
|
24
|
+
"""
|
|
25
|
+
if weight is not None and not isinstance(weight, torch.Tensor):
|
|
26
|
+
weight = torch.tensor(weight)
|
|
27
|
+
super().__init__(*args, **kwargs, weight=weight)
|
|
@@ -3,8 +3,6 @@
|
|
|
3
3
|
from eva.core.metrics.average_loss import AverageLoss
|
|
4
4
|
from eva.core.metrics.binary_balanced_accuracy import BinaryBalancedAccuracy
|
|
5
5
|
from eva.core.metrics.defaults import BinaryClassificationMetrics, MulticlassClassificationMetrics
|
|
6
|
-
from eva.core.metrics.generalized_dice import GeneralizedDiceScore
|
|
7
|
-
from eva.core.metrics.mean_iou import MeanIoU
|
|
8
6
|
from eva.core.metrics.structs import Metric, MetricCollection, MetricModule, MetricsSchema
|
|
9
7
|
|
|
10
8
|
__all__ = [
|
|
@@ -12,8 +10,6 @@ __all__ = [
|
|
|
12
10
|
"BinaryBalancedAccuracy",
|
|
13
11
|
"BinaryClassificationMetrics",
|
|
14
12
|
"MulticlassClassificationMetrics",
|
|
15
|
-
"GeneralizedDiceScore",
|
|
16
|
-
"MeanIoU",
|
|
17
13
|
"Metric",
|
|
18
14
|
"MetricCollection",
|
|
19
15
|
"MetricModule",
|
|
@@ -4,10 +4,8 @@ from eva.core.metrics.defaults.classification import (
|
|
|
4
4
|
BinaryClassificationMetrics,
|
|
5
5
|
MulticlassClassificationMetrics,
|
|
6
6
|
)
|
|
7
|
-
from eva.core.metrics.defaults.segmentation import MulticlassSegmentationMetrics
|
|
8
7
|
|
|
9
8
|
__all__ = [
|
|
10
9
|
"MulticlassClassificationMetrics",
|
|
11
10
|
"BinaryClassificationMetrics",
|
|
12
|
-
"MulticlassSegmentationMetrics",
|
|
13
11
|
]
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
"""Base model module."""
|
|
2
2
|
|
|
3
|
+
import os
|
|
3
4
|
from typing import Any, Mapping
|
|
4
5
|
|
|
5
6
|
import lightning.pytorch as pl
|
|
6
7
|
import torch
|
|
7
|
-
from lightning.pytorch.strategies.single_device import SingleDeviceStrategy
|
|
8
8
|
from lightning.pytorch.utilities import memory
|
|
9
9
|
from lightning.pytorch.utilities.types import STEP_OUTPUT
|
|
10
10
|
from typing_extensions import override
|
|
@@ -49,14 +49,14 @@ class ModelModule(pl.LightningModule):
|
|
|
49
49
|
|
|
50
50
|
@property
|
|
51
51
|
def metrics_device(self) -> torch.device:
|
|
52
|
-
"""Returns the device by which the metrics should be calculated.
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
return
|
|
52
|
+
"""Returns the device by which the metrics should be calculated."""
|
|
53
|
+
device = os.getenv("METRICS_DEVICE", None)
|
|
54
|
+
if device is not None:
|
|
55
|
+
return torch.device(device)
|
|
56
|
+
elif self.device.type == "mps":
|
|
57
|
+
# mps seems to have compatibility issues with segmentation metrics
|
|
58
|
+
return torch.device("cpu")
|
|
59
|
+
return self.device
|
|
60
60
|
|
|
61
61
|
@override
|
|
62
62
|
def on_fit_start(self) -> None:
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Transforms for extracting the CLS output from a model output."""
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
from transformers import modeling_outputs
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ExtractCLSFeatures:
|
|
8
|
+
"""Extracts the CLS token from a ViT model output."""
|
|
9
|
+
|
|
10
|
+
def __init__(
|
|
11
|
+
self, cls_index: int = 0, num_register_tokens: int = 0, include_patch_tokens: bool = False
|
|
12
|
+
) -> None:
|
|
13
|
+
"""Initializes the transformation.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
cls_index: The index of the CLS token in the output tensor.
|
|
17
|
+
num_register_tokens: The number of register tokens in the model output.
|
|
18
|
+
include_patch_tokens: Whether to concat the mean aggregated patch tokens with
|
|
19
|
+
the cls token.
|
|
20
|
+
"""
|
|
21
|
+
self._cls_index = cls_index
|
|
22
|
+
self._num_register_tokens = num_register_tokens
|
|
23
|
+
self._include_patch_tokens = include_patch_tokens
|
|
24
|
+
|
|
25
|
+
def __call__(
|
|
26
|
+
self, tensor: torch.Tensor | modeling_outputs.BaseModelOutputWithPooling
|
|
27
|
+
) -> torch.Tensor:
|
|
28
|
+
"""Call method for the transformation.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
tensor: The tensor representing the model output.
|
|
32
|
+
"""
|
|
33
|
+
if isinstance(tensor, modeling_outputs.BaseModelOutputWithPooling):
|
|
34
|
+
tensor = tensor.last_hidden_state
|
|
35
|
+
|
|
36
|
+
cls_token = tensor[:, self._cls_index, :]
|
|
37
|
+
if self._include_patch_tokens:
|
|
38
|
+
patch_tokens = tensor[:, 1 + self._num_register_tokens :, :]
|
|
39
|
+
return torch.cat([cls_token, patch_tokens.mean(1)], dim=-1)
|
|
40
|
+
|
|
41
|
+
return cls_token
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"""Transforms for extracting the patch features from a model output."""
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
from typing import List
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
from transformers import modeling_outputs
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ExtractPatchFeatures:
|
|
11
|
+
"""Extracts the patch features from a ViT model output."""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
has_cls_token: bool = True,
|
|
16
|
+
num_register_tokens: int = 0,
|
|
17
|
+
ignore_remaining_dims: bool = False,
|
|
18
|
+
) -> None:
|
|
19
|
+
"""Initializes the transformation.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
has_cls_token: If set to `True`, the model output is expected to have
|
|
23
|
+
a classification token.
|
|
24
|
+
num_register_tokens: The number of register tokens in the model output.
|
|
25
|
+
ignore_remaining_dims: If set to `True`, ignore the remaining dimensions
|
|
26
|
+
of the patch grid if it is not a square number.
|
|
27
|
+
"""
|
|
28
|
+
self._has_cls_token = has_cls_token
|
|
29
|
+
self._num_register_tokens = num_register_tokens
|
|
30
|
+
self._ignore_remaining_dims = ignore_remaining_dims
|
|
31
|
+
|
|
32
|
+
def __call__(
|
|
33
|
+
self, tensor: torch.Tensor | modeling_outputs.BaseModelOutputWithPooling
|
|
34
|
+
) -> List[torch.Tensor]:
|
|
35
|
+
"""Call method for the transformation.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
tensor: The raw embeddings of the model.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
A tensor (batch_size, hidden_size, n_patches_height, n_patches_width)
|
|
42
|
+
representing the model output.
|
|
43
|
+
"""
|
|
44
|
+
num_skip = int(self._has_cls_token) + self._num_register_tokens
|
|
45
|
+
if isinstance(tensor, modeling_outputs.BaseModelOutputWithPooling):
|
|
46
|
+
features = tensor.last_hidden_state[:, num_skip:, :].permute(0, 2, 1)
|
|
47
|
+
else:
|
|
48
|
+
features = tensor[:, num_skip:, :].permute(0, 2, 1)
|
|
49
|
+
|
|
50
|
+
batch_size, hidden_size, patch_grid = features.shape
|
|
51
|
+
height = width = int(math.sqrt(patch_grid))
|
|
52
|
+
if height * width != patch_grid:
|
|
53
|
+
if self._ignore_remaining_dims:
|
|
54
|
+
features = features[:, :, -height * width :]
|
|
55
|
+
else:
|
|
56
|
+
raise ValueError(f"Patch grid size must be a square number {patch_grid}.")
|
|
57
|
+
patch_embeddings = features.view(batch_size, hidden_size, height, width)
|
|
58
|
+
|
|
59
|
+
return [patch_embeddings]
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Progress bar utility functions."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from tqdm import tqdm as _tqdm
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def tqdm(*args, **kwargs) -> _tqdm:
|
|
9
|
+
"""Wrapper function for `tqdm.tqdm`."""
|
|
10
|
+
refresh_rate = os.environ.get("TQDM_REFRESH_RATE")
|
|
11
|
+
refresh_rate = int(refresh_rate) if refresh_rate is not None else None
|
|
12
|
+
disable = bool(int(os.environ.get("TQDM_DISABLE", 0))) or (refresh_rate == 0)
|
|
13
|
+
kwargs.setdefault("disable", disable)
|
|
14
|
+
kwargs.setdefault("miniters", refresh_rate)
|
|
15
|
+
return _tqdm(*args, **kwargs)
|
|
@@ -6,6 +6,7 @@ from eva.vision.data.datasets.classification import (
|
|
|
6
6
|
MHIST,
|
|
7
7
|
PANDA,
|
|
8
8
|
Camelyon16,
|
|
9
|
+
PANDASmall,
|
|
9
10
|
PatchCamelyon,
|
|
10
11
|
WsiClassificationDataset,
|
|
11
12
|
)
|
|
@@ -15,6 +16,7 @@ from eva.vision.data.datasets.segmentation import (
|
|
|
15
16
|
EmbeddingsSegmentationDataset,
|
|
16
17
|
ImageSegmentation,
|
|
17
18
|
LiTS,
|
|
19
|
+
LiTSBalanced,
|
|
18
20
|
MoNuSAC,
|
|
19
21
|
TotalSegmentator2D,
|
|
20
22
|
)
|
|
@@ -27,6 +29,7 @@ __all__ = [
|
|
|
27
29
|
"CRC",
|
|
28
30
|
"MHIST",
|
|
29
31
|
"PANDA",
|
|
32
|
+
"PANDASmall",
|
|
30
33
|
"Camelyon16",
|
|
31
34
|
"PatchCamelyon",
|
|
32
35
|
"WsiClassificationDataset",
|
|
@@ -34,6 +37,7 @@ __all__ = [
|
|
|
34
37
|
"EmbeddingsSegmentationDataset",
|
|
35
38
|
"ImageSegmentation",
|
|
36
39
|
"LiTS",
|
|
40
|
+
"LiTSBalanced",
|
|
37
41
|
"MoNuSAC",
|
|
38
42
|
"TotalSegmentator2D",
|
|
39
43
|
"VisionDataset",
|
|
@@ -4,7 +4,7 @@ from eva.vision.data.datasets.classification.bach import BACH
|
|
|
4
4
|
from eva.vision.data.datasets.classification.camelyon16 import Camelyon16
|
|
5
5
|
from eva.vision.data.datasets.classification.crc import CRC
|
|
6
6
|
from eva.vision.data.datasets.classification.mhist import MHIST
|
|
7
|
-
from eva.vision.data.datasets.classification.panda import PANDA
|
|
7
|
+
from eva.vision.data.datasets.classification.panda import PANDA, PANDASmall
|
|
8
8
|
from eva.vision.data.datasets.classification.patch_camelyon import PatchCamelyon
|
|
9
9
|
from eva.vision.data.datasets.classification.wsi import WsiClassificationDataset
|
|
10
10
|
|
|
@@ -15,5 +15,6 @@ __all__ = [
|
|
|
15
15
|
"PatchCamelyon",
|
|
16
16
|
"WsiClassificationDataset",
|
|
17
17
|
"PANDA",
|
|
18
|
+
"PANDASmall",
|
|
18
19
|
"Camelyon16",
|
|
19
20
|
]
|
{kaiko_eva-0.1.0 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/camelyon16.py
RENAMED
|
@@ -87,6 +87,7 @@ class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
87
87
|
target_mpp: float = 0.5,
|
|
88
88
|
backend: str = "openslide",
|
|
89
89
|
image_transforms: Callable | None = None,
|
|
90
|
+
coords_path: str | None = None,
|
|
90
91
|
seed: int = 42,
|
|
91
92
|
) -> None:
|
|
92
93
|
"""Initializes the dataset.
|
|
@@ -100,6 +101,7 @@ class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
100
101
|
target_mpp: Target microns per pixel (mpp) for the patches.
|
|
101
102
|
backend: The backend to use for reading the whole-slide images.
|
|
102
103
|
image_transforms: Transforms to apply to the extracted image patches.
|
|
104
|
+
coords_path: File path to save the patch coordinates as .csv.
|
|
103
105
|
seed: Random seed for reproducibility.
|
|
104
106
|
"""
|
|
105
107
|
self._split = split
|
|
@@ -119,6 +121,7 @@ class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
119
121
|
target_mpp=target_mpp,
|
|
120
122
|
backend=backend,
|
|
121
123
|
image_transforms=image_transforms,
|
|
124
|
+
coords_path=coords_path,
|
|
122
125
|
)
|
|
123
126
|
|
|
124
127
|
@property
|
|
@@ -207,7 +210,7 @@ class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
207
210
|
|
|
208
211
|
@override
|
|
209
212
|
def load_metadata(self, index: int) -> Dict[str, Any]:
|
|
210
|
-
return
|
|
213
|
+
return wsi.MultiWsiDataset.load_metadata(self, index)
|
|
211
214
|
|
|
212
215
|
def _load_file_paths(self, split: Literal["train", "val", "test"] | None = None) -> List[str]:
|
|
213
216
|
"""Loads the file paths of the corresponding dataset split."""
|
|
@@ -49,6 +49,7 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
49
49
|
target_mpp: float = 0.5,
|
|
50
50
|
backend: str = "openslide",
|
|
51
51
|
image_transforms: Callable | None = None,
|
|
52
|
+
coords_path: str | None = None,
|
|
52
53
|
seed: int = 42,
|
|
53
54
|
) -> None:
|
|
54
55
|
"""Initializes the dataset.
|
|
@@ -62,6 +63,7 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
62
63
|
target_mpp: Target microns per pixel (mpp) for the patches.
|
|
63
64
|
backend: The backend to use for reading the whole-slide images.
|
|
64
65
|
image_transforms: Transforms to apply to the extracted image patches.
|
|
66
|
+
coords_path: File path to save the patch coordinates as .csv.
|
|
65
67
|
seed: Random seed for reproducibility.
|
|
66
68
|
"""
|
|
67
69
|
self._split = split
|
|
@@ -80,6 +82,7 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
80
82
|
target_mpp=target_mpp,
|
|
81
83
|
backend=backend,
|
|
82
84
|
image_transforms=image_transforms,
|
|
85
|
+
coords_path=coords_path,
|
|
83
86
|
)
|
|
84
87
|
|
|
85
88
|
@property
|
|
@@ -132,7 +135,7 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
132
135
|
|
|
133
136
|
@override
|
|
134
137
|
def load_metadata(self, index: int) -> Dict[str, Any]:
|
|
135
|
-
return
|
|
138
|
+
return wsi.MultiWsiDataset.load_metadata(self, index)
|
|
136
139
|
|
|
137
140
|
def _load_file_paths(self, split: Literal["train", "val", "test"] | None = None) -> List[str]:
|
|
138
141
|
"""Loads the file paths of the corresponding dataset split."""
|
|
@@ -182,3 +185,16 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
182
185
|
|
|
183
186
|
def _get_id_from_path(self, file_path: str) -> str:
|
|
184
187
|
return os.path.basename(file_path).replace(".tiff", "")
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class PANDASmall(PANDA):
|
|
191
|
+
"""Small version of the PANDA dataset for quicker benchmarking."""
|
|
192
|
+
|
|
193
|
+
_train_split_ratio: float = 0.1
|
|
194
|
+
"""Train split ratio."""
|
|
195
|
+
|
|
196
|
+
_val_split_ratio: float = 0.05
|
|
197
|
+
"""Validation split ratio."""
|
|
198
|
+
|
|
199
|
+
_test_split_ratio: float = 0.05
|
|
200
|
+
"""Test split ratio."""
|
|
@@ -35,6 +35,7 @@ class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
35
35
|
split: Literal["train", "val", "test"] | None = None,
|
|
36
36
|
image_transforms: Callable | None = None,
|
|
37
37
|
column_mapping: Dict[str, str] = default_column_mapping,
|
|
38
|
+
coords_path: str | None = None,
|
|
38
39
|
):
|
|
39
40
|
"""Initializes the dataset.
|
|
40
41
|
|
|
@@ -51,6 +52,7 @@ class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
51
52
|
split: The split of the dataset to load.
|
|
52
53
|
image_transforms: Transforms to apply to the extracted image patches.
|
|
53
54
|
column_mapping: Mapping of the columns in the manifest file.
|
|
55
|
+
coords_path: File path to save the patch coordinates as .csv.
|
|
54
56
|
"""
|
|
55
57
|
self._split = split
|
|
56
58
|
self._column_mapping = self.default_column_mapping | column_mapping
|
|
@@ -66,6 +68,7 @@ class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
66
68
|
target_mpp=target_mpp,
|
|
67
69
|
backend=backend,
|
|
68
70
|
image_transforms=image_transforms,
|
|
71
|
+
coords_path=coords_path,
|
|
69
72
|
)
|
|
70
73
|
|
|
71
74
|
@override
|
|
@@ -88,7 +91,7 @@ class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification):
|
|
|
88
91
|
|
|
89
92
|
@override
|
|
90
93
|
def load_metadata(self, index: int) -> Dict[str, Any]:
|
|
91
|
-
return
|
|
94
|
+
return wsi.MultiWsiDataset.load_metadata(self, index)
|
|
92
95
|
|
|
93
96
|
def _load_manifest(self, manifest_path: str) -> pd.DataFrame:
|
|
94
97
|
df = pd.read_csv(manifest_path)
|
|
@@ -5,6 +5,7 @@ from eva.vision.data.datasets.segmentation.bcss import BCSS
|
|
|
5
5
|
from eva.vision.data.datasets.segmentation.consep import CoNSeP
|
|
6
6
|
from eva.vision.data.datasets.segmentation.embeddings import EmbeddingsSegmentationDataset
|
|
7
7
|
from eva.vision.data.datasets.segmentation.lits import LiTS
|
|
8
|
+
from eva.vision.data.datasets.segmentation.lits_balanced import LiTSBalanced
|
|
8
9
|
from eva.vision.data.datasets.segmentation.monusac import MoNuSAC
|
|
9
10
|
from eva.vision.data.datasets.segmentation.total_segmentator_2d import TotalSegmentator2D
|
|
10
11
|
|
|
@@ -14,6 +15,7 @@ __all__ = [
|
|
|
14
15
|
"CoNSeP",
|
|
15
16
|
"EmbeddingsSegmentationDataset",
|
|
16
17
|
"LiTS",
|
|
18
|
+
"LiTSBalanced",
|
|
17
19
|
"MoNuSAC",
|
|
18
20
|
"TotalSegmentator2D",
|
|
19
21
|
]
|
|
@@ -37,8 +37,8 @@ class CoNSeP(wsi.MultiWsiDataset, base.ImageSegmentation):
|
|
|
37
37
|
root: str,
|
|
38
38
|
sampler: samplers.Sampler | None = None,
|
|
39
39
|
split: Literal["train", "val"] | None = None,
|
|
40
|
-
width: int =
|
|
41
|
-
height: int =
|
|
40
|
+
width: int = 250,
|
|
41
|
+
height: int = 250,
|
|
42
42
|
target_mpp: float = 0.25,
|
|
43
43
|
transforms: Callable | None = None,
|
|
44
44
|
) -> None:
|