kaiko-eva 0.1.1__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaiko-eva might be problematic. Click here for more details.

Files changed (551) hide show
  1. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/PKG-INFO +3 -34
  2. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/README.md +2 -33
  3. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/pyproject.toml +1 -1
  4. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/base.py +3 -4
  5. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/dataloaders/dataloader.py +2 -2
  6. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/splitting/random.py +6 -5
  7. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/splitting/stratified.py +12 -6
  8. kaiko_eva-0.1.3/src/eva/core/losses/__init__.py +5 -0
  9. kaiko_eva-0.1.3/src/eva/core/losses/cross_entropy.py +27 -0
  10. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/__init__.py +0 -4
  11. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/defaults/__init__.py +0 -2
  12. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/modules/module.py +9 -9
  13. kaiko_eva-0.1.3/src/eva/core/models/transforms/extract_cls_features.py +41 -0
  14. kaiko_eva-0.1.3/src/eva/core/models/transforms/extract_patch_features.py +59 -0
  15. kaiko_eva-0.1.3/src/eva/core/utils/progress_bar.py +15 -0
  16. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/__init__.py +4 -0
  17. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/__init__.py +2 -1
  18. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/camelyon16.py +4 -1
  19. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/panda.py +17 -1
  20. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/wsi.py +4 -1
  21. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/__init__.py +2 -0
  22. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/consep.py +2 -2
  23. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/lits.py +49 -29
  24. kaiko_eva-0.1.3/src/eva/vision/data/datasets/segmentation/lits_balanced.py +93 -0
  25. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/monusac.py +7 -7
  26. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/total_segmentator_2d.py +2 -2
  27. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/wsi.py +37 -1
  28. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/coordinates.py +9 -1
  29. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/_utils.py +2 -8
  30. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/random.py +4 -2
  31. kaiko_eva-0.1.3/src/eva/vision/losses/__init__.py +5 -0
  32. kaiko_eva-0.1.3/src/eva/vision/losses/dice.py +107 -0
  33. kaiko_eva-0.1.3/src/eva/vision/metrics/__init__.py +11 -0
  34. kaiko_eva-0.1.3/src/eva/vision/metrics/defaults/__init__.py +7 -0
  35. {kaiko_eva-0.1.1/src/eva/core → kaiko_eva-0.1.3/src/eva/vision}/metrics/defaults/segmentation/__init__.py +1 -1
  36. {kaiko_eva-0.1.1/src/eva/core → kaiko_eva-0.1.3/src/eva/vision}/metrics/defaults/segmentation/multiclass.py +2 -1
  37. kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation/BUILD +1 -0
  38. kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation/__init__.py +9 -0
  39. kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation/_utils.py +69 -0
  40. {kaiko_eva-0.1.1/src/eva/core/metrics → kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation}/generalized_dice.py +12 -10
  41. kaiko_eva-0.1.3/src/eva/vision/metrics/segmentation/mean_iou.py +57 -0
  42. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/modules/semantic_segmentation.py +4 -3
  43. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/_utils.py +12 -0
  44. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/__init__.py +4 -1
  45. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/histai.py +8 -2
  46. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/mahmood.py +2 -9
  47. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/owkin.py +14 -0
  48. kaiko_eva-0.1.3/src/eva/vision/models/networks/backbones/pathology/paige.py +51 -0
  49. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/decoders/__init__.py +1 -1
  50. kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/__init__.py +19 -0
  51. kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/base.py +16 -0
  52. kaiko_eva-0.1.1/src/eva/vision/models/networks/decoders/segmentation/conv2d.py → kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/decoder2d.py +26 -22
  53. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/decoders/segmentation/linear.py +2 -2
  54. kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/semantic/__init__.py +12 -0
  55. {kaiko_eva-0.1.1/src/eva/vision/models/networks/decoders/segmentation → kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/semantic}/common.py +3 -3
  56. kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/semantic/with_image.py +94 -0
  57. kaiko_eva-0.1.3/src/eva/vision/models/networks/decoders/segmentation/typings.py +18 -0
  58. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/__init__.py +7 -1
  59. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/nifti.py +19 -4
  60. kaiko_eva-0.1.3/tests/eva/assets/vision/datasets/lits/Training_Batch2/segmentation-31.nii +3 -0
  61. kaiko_eva-0.1.3/tests/eva/assets/vision/datasets/lits/Training_Batch2/segmentation-45.nii +3 -0
  62. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/splitting/test_random.py +23 -4
  63. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/splitting/test_stratified.py +28 -7
  64. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/wrappers/test_huggingface.py +5 -0
  65. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_camelyon16.py +5 -0
  66. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_panda.py +18 -3
  67. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_wsi.py +5 -0
  68. kaiko_eva-0.1.3/tests/eva/vision/data/datasets/segmentation/test_lits_balanced.py +59 -0
  69. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/test_wsi.py +10 -2
  70. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/samplers/test_foreground_grid.py +13 -7
  71. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/samplers/test_grid.py +28 -5
  72. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/samplers/test_random.py +27 -5
  73. kaiko_eva-0.1.3/tests/eva/vision/metrics/defaults/__init__.py +1 -0
  74. {kaiko_eva-0.1.1/tests/eva/core → kaiko_eva-0.1.3/tests/eva/vision}/metrics/defaults/segmentation/test_multiclass.py +1 -1
  75. kaiko_eva-0.1.3/tests/eva/vision/metrics/segmentation/__init__.py +1 -0
  76. kaiko_eva-0.1.3/tests/eva/vision/metrics/segmentation/_utils.py +32 -0
  77. kaiko_eva-0.1.3/tests/eva/vision/metrics/segmentation/test_generalized_dice.py +24 -0
  78. kaiko_eva-0.1.3/tests/eva/vision/metrics/segmentation/test_mean_iou.py +24 -0
  79. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/modules/test_semantic_segmentation.py +1 -1
  80. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/decoders/segmentation/conv.py +4 -4
  81. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/decoders/segmentation/linear.py +1 -1
  82. kaiko_eva-0.1.1/src/eva/core/metrics/mean_iou.py +0 -120
  83. kaiko_eva-0.1.1/src/eva/core/models/transforms/extract_cls_features.py +0 -33
  84. kaiko_eva-0.1.1/src/eva/core/models/transforms/extract_patch_features.py +0 -47
  85. kaiko_eva-0.1.1/src/eva/vision/losses/__init__.py +0 -5
  86. kaiko_eva-0.1.1/src/eva/vision/losses/dice.py +0 -40
  87. kaiko_eva-0.1.1/src/eva/vision/models/networks/decoders/decoder.py +0 -7
  88. kaiko_eva-0.1.1/src/eva/vision/models/networks/decoders/segmentation/__init__.py +0 -11
  89. kaiko_eva-0.1.1/tests/eva/assets/vision/datasets/lits/Training_Batch2/segmentation-31.nii +0 -3
  90. kaiko_eva-0.1.1/tests/eva/assets/vision/datasets/lits/Training_Batch2/segmentation-45.nii +0 -3
  91. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/LICENSE +0 -0
  92. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/__init__.py +0 -0
  93. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/__main__.py +0 -0
  94. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/__version__.py +0 -0
  95. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/__init__.py +0 -0
  96. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/__init__.py +0 -0
  97. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/config.py +0 -0
  98. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/__init__.py +0 -0
  99. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/__init__.py +0 -0
  100. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/_manifest.py +0 -0
  101. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/classification.py +0 -0
  102. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/segmentation.py +0 -0
  103. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/callbacks/writers/embeddings/typings.py +0 -0
  104. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/cli/__init__.py +0 -0
  105. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/cli/cli.py +0 -0
  106. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/cli/logo.py +0 -0
  107. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/cli/setup.py +0 -0
  108. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/__init__.py +0 -0
  109. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/dataloaders/__init__.py +0 -0
  110. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datamodules/__init__.py +0 -0
  111. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datamodules/call.py +0 -0
  112. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datamodules/datamodule.py +0 -0
  113. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datamodules/schemas.py +0 -0
  114. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/__init__.py +0 -0
  115. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/base.py +0 -0
  116. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/classification/__init__.py +0 -0
  117. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/classification/embeddings.py +0 -0
  118. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/classification/multi_embeddings.py +0 -0
  119. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/dataset.py +0 -0
  120. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/datasets/embeddings.py +0 -0
  121. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/samplers/__init__.py +0 -0
  122. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/samplers/sampler.py +0 -0
  123. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/splitting/__init__.py +0 -0
  124. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/__init__.py +0 -0
  125. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/dtype/__init__.py +0 -0
  126. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/dtype/array.py +0 -0
  127. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/padding/__init__.py +0 -0
  128. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/padding/pad_2d_tensor.py +0 -0
  129. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/sampling/__init__.py +0 -0
  130. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/data/transforms/sampling/sample_from_axis.py +0 -0
  131. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/interface/__init__.py +0 -0
  132. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/interface/interface.py +0 -0
  133. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/loggers/__init__.py +0 -0
  134. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/loggers/dummy.py +0 -0
  135. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/loggers/experimental_loggers.py +0 -0
  136. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/loggers/log/__init__.py +0 -0
  137. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/loggers/log/image.py +0 -0
  138. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/loggers/log/parameters.py +0 -0
  139. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/loggers/log/utils.py +0 -0
  140. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/loggers/loggers.py +0 -0
  141. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/average_loss.py +0 -0
  142. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/binary_balanced_accuracy.py +0 -0
  143. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/defaults/classification/__init__.py +0 -0
  144. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/defaults/classification/binary.py +0 -0
  145. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/defaults/classification/multiclass.py +0 -0
  146. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/__init__.py +0 -0
  147. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/collection.py +0 -0
  148. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/metric.py +0 -0
  149. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/module.py +0 -0
  150. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/schemas.py +0 -0
  151. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/metrics/structs/typings.py +0 -0
  152. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/__init__.py +0 -0
  153. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/modules/__init__.py +0 -0
  154. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/modules/head.py +0 -0
  155. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/modules/inference.py +0 -0
  156. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/modules/typings.py +0 -0
  157. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/modules/utils/__init__.py +0 -0
  158. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/modules/utils/batch_postprocess.py +0 -0
  159. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/modules/utils/grad.py +0 -0
  160. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/networks/__init__.py +0 -0
  161. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/networks/mlp.py +0 -0
  162. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/transforms/__init__.py +0 -0
  163. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/__init__.py +0 -0
  164. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/_utils.py +0 -0
  165. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/base.py +0 -0
  166. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/from_function.py +0 -0
  167. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/huggingface.py +0 -0
  168. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/models/wrappers/onnx.py +0 -0
  169. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/trainers/__init__.py +0 -0
  170. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/trainers/_logging.py +0 -0
  171. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/trainers/_recorder.py +0 -0
  172. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/trainers/_utils.py +0 -0
  173. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/trainers/functional.py +0 -0
  174. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/trainers/trainer.py +0 -0
  175. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/__init__.py +0 -0
  176. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/clone.py +0 -0
  177. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/io/__init__.py +0 -0
  178. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/io/dataframe.py +0 -0
  179. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/memory.py +0 -0
  180. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/multiprocessing.py +0 -0
  181. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/operations.py +0 -0
  182. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/parser.py +0 -0
  183. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/core/utils/workers.py +0 -0
  184. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/__init__.py +0 -0
  185. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/__init__.py +0 -0
  186. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/loggers/__init__.py +0 -0
  187. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/loggers/batch/__init__.py +0 -0
  188. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/loggers/batch/base.py +0 -0
  189. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/callbacks/loggers/batch/segmentation.py +0 -0
  190. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/__init__.py +0 -0
  191. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/_utils.py +0 -0
  192. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/_validators.py +0 -0
  193. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/bach.py +0 -0
  194. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/base.py +0 -0
  195. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/crc.py +0 -0
  196. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/mhist.py +0 -0
  197. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/classification/patch_camelyon.py +0 -0
  198. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/_utils.py +0 -0
  199. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/base.py +0 -0
  200. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/bcss.py +0 -0
  201. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/segmentation/embeddings.py +0 -0
  202. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/structs.py +0 -0
  203. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/datasets/vision.py +0 -0
  204. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/__init__.py +0 -0
  205. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/common/__init__.py +0 -0
  206. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/common/resize_and_clamp.py +0 -0
  207. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/common/resize_and_crop.py +0 -0
  208. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/__init__.py +0 -0
  209. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/clamp.py +0 -0
  210. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/functional/__init__.py +0 -0
  211. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/functional/rescale_intensity.py +0 -0
  212. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/transforms/normalization/rescale_intensity.py +0 -0
  213. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/__init__.py +0 -0
  214. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/__init__.py +0 -0
  215. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/base.py +0 -0
  216. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/openslide.py +0 -0
  217. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/pil.py +0 -0
  218. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/backends/tiffslide.py +0 -0
  219. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/__init__.py +0 -0
  220. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/mask.py +0 -0
  221. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/__init__.py +0 -0
  222. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/base.py +0 -0
  223. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/foreground_grid.py +0 -0
  224. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/data/wsi/patching/samplers/grid.py +0 -0
  225. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/__init__.py +0 -0
  226. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/modules/__init__.py +0 -0
  227. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/__init__.py +0 -0
  228. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/abmil.py +0 -0
  229. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/__init__.py +0 -0
  230. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/bioptimus.py +0 -0
  231. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/gigapath.py +0 -0
  232. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/kaiko.py +0 -0
  233. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/pathology/lunit.py +0 -0
  234. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/registry.py +0 -0
  235. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/timm/__init__.py +0 -0
  236. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/timm/backbones.py +0 -0
  237. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/universal/__init__.py +0 -0
  238. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/networks/backbones/universal/vit.py +0 -0
  239. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/wrappers/__init__.py +0 -0
  240. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/wrappers/from_registry.py +0 -0
  241. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/models/wrappers/from_timm.py +0 -0
  242. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/__init__.py +0 -0
  243. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/colormap.py +0 -0
  244. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/convert.py +0 -0
  245. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/_utils.py +0 -0
  246. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/image.py +0 -0
  247. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/mat.py +0 -0
  248. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/src/eva/vision/utils/io/text.py +0 -0
  249. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/__init__.py +0 -0
  250. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/__init__.py +0 -0
  251. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/_cli.py +0 -0
  252. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_0_shape_8.pt +0 -0
  253. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_1_shape_8.pt +0 -0
  254. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_2_shape_8_list.pt +0 -0
  255. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_3_shape_8_list.pt +0 -0
  256. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_4_shape_1x8.pt +0 -0
  257. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_5_shape_1x8.pt +0 -0
  258. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_6_shape_1x8_list.pt +0 -0
  259. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/embeddings/tensor_7_shape_1x8_list.pt +0 -0
  260. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/embeddings/manifest.csv +0 -0
  261. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_0_shape_6x8.pt +0 -0
  262. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_1_shape_3x8.pt +0 -0
  263. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_2_shape_1x8.pt +0 -0
  264. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_3_shape_2x8.pt +0 -0
  265. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_4_shape_5x8.pt +0 -0
  266. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_5_shape_3x8.pt +0 -0
  267. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_6_shape_1x8_list.pt +0 -0
  268. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_7_shape_6x8_list.pt +0 -0
  269. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_8_shape_2x8_list.pt +0 -0
  270. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/embeddings/tensor_9_shape_5x8_list.pt +0 -0
  271. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/core/datasets/multi-embeddings/manifest.csv +0 -0
  272. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/images/random_bgr_32x32.png +0 -0
  273. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/images/random_grayscale_32x32.png +0 -0
  274. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b001.tif +0 -0
  275. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b002.tif +0 -0
  276. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b003.tif +0 -0
  277. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b004.tif +0 -0
  278. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b005.tif +0 -0
  279. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Benign/b006.tif +0 -0
  280. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is001.tif +0 -0
  281. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is002.tif +0 -0
  282. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is003.tif +0 -0
  283. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is004.tif +0 -0
  284. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is005.tif +0 -0
  285. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/InSitu/is006.tif +0 -0
  286. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv001.tif +0 -0
  287. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv002.tif +0 -0
  288. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv003.tif +0 -0
  289. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv004.tif +0 -0
  290. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv005.tif +0 -0
  291. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Invasive/iv006.tif +0 -0
  292. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n001.tif +0 -0
  293. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n002.tif +0 -0
  294. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n003.tif +0 -0
  295. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n004.tif +0 -0
  296. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n005.tif +0 -0
  297. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bach/ICIAR2018_BACH_Challenge/Photos/Normal/n006.tif +0 -0
  298. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-A2-A0CM-DX1_xmin18562_ymin56852_MPP-0.2500.png +0 -0
  299. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-A7-A4SD-DX1_xmin53807_ymin11871_MPP-0.2500.png +0 -0
  300. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-AR-A0TS-DX1_xmin118843_ymin22812_MPP-0.2500.png +0 -0
  301. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-AR-A1AQ-DX1_xmin18171_ymin38296_MPP-0.2500.png +0 -0
  302. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-C8-A3XY-DX1_xmin76297_ymin35510_MPP-0.2500.png +0 -0
  303. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-D8-A1XQ-DX1_xmin61261_ymin33317_MPP-0.2500.png +0 -0
  304. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-EW-A1P4-DX1_xmin17256_ymin35430_MPP-0.2500.png +0 -0
  305. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-GI-A2C9-DX1_xmin20882_ymin11843_MPP-0.2500.png +0 -0
  306. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-OL-A5D6-DX1_xmin115108_ymin40554_MPP-0.2500.png +0 -0
  307. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/masks/TCGA-OL-A5D7-DX1_xmin114443_ymin22490_MPP-0.2500.png +0 -0
  308. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-A2-A0CM-DX1_xmin18562_ymin56852_MPP-0.2500.png +0 -0
  309. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-A7-A4SD-DX1_xmin53807_ymin11871_MPP-0.2500.png +0 -0
  310. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-AR-A0TS-DX1_xmin118843_ymin22812_MPP-0.2500.png +0 -0
  311. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-AR-A1AQ-DX1_xmin18171_ymin38296_MPP-0.2500.png +0 -0
  312. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-C8-A3XY-DX1_xmin76297_ymin35510_MPP-0.2500.png +0 -0
  313. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-D8-A1XQ-DX1_xmin61261_ymin33317_MPP-0.2500.png +0 -0
  314. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-EW-A1P4-DX1_xmin17256_ymin35430_MPP-0.2500.png +0 -0
  315. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-GI-A2C9-DX1_xmin20882_ymin11843_MPP-0.2500.png +0 -0
  316. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-OL-A5D6-DX1_xmin115108_ymin40554_MPP-0.2500.png +0 -0
  317. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/bcss/rgbs_colorNormalized/TCGA-OL-A5D7-DX1_xmin114443_ymin22490_MPP-0.2500.png +0 -0
  318. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_001.tif +0 -0
  319. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/testing/images/test_002.tif +0 -0
  320. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/testing/reference.csv +0 -0
  321. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_001.tif +0 -0
  322. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/training/normal/normal_002.tif +0 -0
  323. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_001.tif +0 -0
  324. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/camelyon16/training/tumor/tumor_002.tif +0 -0
  325. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Images/test_1.png +0 -0
  326. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Images/test_2.png +0 -0
  327. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Images/test_3.png +0 -0
  328. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Labels/test_1.mat +0 -0
  329. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Labels/test_2.mat +0 -0
  330. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Test/Labels/test_3.mat +0 -0
  331. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Images/train_1.png +0 -0
  332. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Images/train_2.png +0 -0
  333. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Images/train_3.png +0 -0
  334. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Images/train_4.png +0 -0
  335. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Labels/train_1.mat +0 -0
  336. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Labels/train_2.mat +0 -0
  337. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Labels/train_3.mat +0 -0
  338. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/consep/Train/Labels/train_4.mat +0 -0
  339. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/ADI/ADI-SIHVHHPH.tif +0 -0
  340. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/ADI/ADI-SIHWWQMY.tif +0 -0
  341. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/BACK/BACK-YYYHKNMK.tif +0 -0
  342. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/BACK/BACK-YYYMDTNW.tif +0 -0
  343. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/DEB/DEB-YYYRSHLP.tif +0 -0
  344. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/DEB/DEB-YYYTCTDR.tif +0 -0
  345. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/LYM/LYM-YYWRPGDD.tif +0 -0
  346. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/LYM/LYM-YYYTKMWW.tif +0 -0
  347. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/MUC/MUC-YYYNWSAM.tif +0 -0
  348. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/MUC/MUC-YYYRQDLW.tif +0 -0
  349. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/MUS/MUS-YYYNVQVQ.tif +0 -0
  350. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/MUS/MUS-YYYRWWNH.tif +0 -0
  351. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/NORM/NORM-YYTTIRVD.tif +0 -0
  352. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/NORM/NORM-YYVAFTKA.tif +0 -0
  353. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/STR/STR-YYYHNSSM.tif +0 -0
  354. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/STR/STR-YYYWVWFG.tif +0 -0
  355. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/TUM/TUM-YYYSGWYW.tif +0 -0
  356. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/CRC-VAL-HE-7K/TUM/TUM-YYYYQFVN.tif +0 -0
  357. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/ADI/ADI-SIHVHHPH.tif +0 -0
  358. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/ADI/ADI-SIHWWQMY.tif +0 -0
  359. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/BACK/BACK-YYYHKNMK.tif +0 -0
  360. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/BACK/BACK-YYYMDTNW.tif +0 -0
  361. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/DEB/DEB-YYYRSHLP.tif +0 -0
  362. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/DEB/DEB-YYYTCTDR.tif +0 -0
  363. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/LYM/LYM-YYWRPGDD.tif +0 -0
  364. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/LYM/LYM-YYYTKMWW.tif +0 -0
  365. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/MUC/MUC-YYYNWSAM.tif +0 -0
  366. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/MUC/MUC-YYYRQDLW.tif +0 -0
  367. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/MUS/MUS-YYYNVQVQ.tif +0 -0
  368. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/MUS/MUS-YYYRWWNH.tif +0 -0
  369. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/NORM/NORM-YYTTIRVD.tif +0 -0
  370. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/NORM/NORM-YYVAFTKA.tif +0 -0
  371. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/STR/STR-YYYHNSSM.tif +0 -0
  372. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/STR/STR-YYYWVWFG.tif +0 -0
  373. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/TUM/TUM-YYYSGWYW.tif +0 -0
  374. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K/TUM/TUM-YYYYQFVN.tif +0 -0
  375. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/ADI/ADI-SIHVHHPH.tif +0 -0
  376. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/ADI/ADI-SIHWWQMY.tif +0 -0
  377. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/BACK/BACK-YYYHKNMK.tif +0 -0
  378. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/BACK/BACK-YYYMDTNW.tif +0 -0
  379. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/DEB/DEB-YYYRSHLP.tif +0 -0
  380. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/DEB/DEB-YYYTCTDR.tif +0 -0
  381. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/LYM/LYM-YYWRPGDD.tif +0 -0
  382. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/LYM/LYM-YYYTKMWW.tif +0 -0
  383. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/MUC/MUC-YYYNWSAM.tif +0 -0
  384. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/MUC/MUC-YYYRQDLW.tif +0 -0
  385. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/MUS/MUS-YYYNVQVQ.tif +0 -0
  386. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/MUS/MUS-YYYRWWNH.tif +0 -0
  387. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/NORM/NORM-YYTTIRVD.tif +0 -0
  388. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/NORM/NORM-YYVAFTKA.tif +0 -0
  389. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/STR/STR-YYYHNSSM.tif +0 -0
  390. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/STR/STR-YYYWVWFG.tif +0 -0
  391. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/TUM/TUM-YYYSGWYW.tif +0 -0
  392. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/crc/NCT-CRC-HE-100K-NONORM/TUM/TUM-YYYYQFVN.tif +0 -0
  393. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/lits/Training_Batch2/volume-31.nii +0 -0
  394. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/lits/Training_Batch2/volume-45.nii +0 -0
  395. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/annotations.csv +0 -0
  396. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aaa.png +0 -0
  397. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aab.png +0 -0
  398. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aac.png +0 -0
  399. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aae.png +0 -0
  400. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aaf.png +0 -0
  401. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aag.png +0 -0
  402. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/mhist/images/MHIST_aah.png +0 -0
  403. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_1.tif +0 -0
  404. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_1.xml +0 -0
  405. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_2.tif +0 -0
  406. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_2.xml +0 -0
  407. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_3.tif +0 -0
  408. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JG-01Z-00-DX1/TCGA-2Z-A9JG-01Z-00-DX1_3.xml +0 -0
  409. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JN-01Z-00-DX1/TCGA-2Z-A9JN-01Z-00-DX1_1.tif +0 -0
  410. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC Testing Data and Annotations/TCGA-2Z-A9JN-01Z-00-DX1/TCGA-2Z-A9JN-01Z-00-DX1_1.xml +0 -0
  411. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-55-1594-01Z-00-DX1/TCGA-55-1594-01Z-00-DX1_003.tif +0 -0
  412. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-55-1594-01Z-00-DX1/TCGA-55-1594-01Z-00-DX1_003.xml +0 -0
  413. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-5P-A9K0-01Z-00-DX1/TCGA-5P-A9K0-01Z-00-DX1_3.tif +0 -0
  414. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-5P-A9K0-01Z-00-DX1/TCGA-5P-A9K0-01Z-00-DX1_3.xml +0 -0
  415. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-7760-01Z-00-DX1/TCGA-69-7760-01Z-00-DX1_001.tif +0 -0
  416. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-7760-01Z-00-DX1/TCGA-69-7760-01Z-00-DX1_001.xml +0 -0
  417. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-A59K-01Z-00-DX1/TCGA-69-A59K-01Z-00-DX1_001.tif +0 -0
  418. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-A59K-01Z-00-DX1/TCGA-69-A59K-01Z-00-DX1_001.xml +0 -0
  419. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-A59K-01Z-00-DX1/TCGA-69-A59K-01Z-00-DX1_002.tif +0 -0
  420. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/monusac/MoNuSAC_images_and_annotations/TCGA-69-A59K-01Z-00-DX1/TCGA-69-A59K-01Z-00-DX1_002.xml +0 -0
  421. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/0214df71ae527e2144021178c453d204.tiff +0 -0
  422. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/02d302a8d723fa00331f373091b29135.tiff +0 -0
  423. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/157565e23ba28d5a42f63f34f3dd4425.tiff +0 -0
  424. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/682a1fd346b6fff340afbdb80c2f7caf.tiff +0 -0
  425. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/8582b59b41635fa38401d1bddad66707.tiff +0 -0
  426. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/8c357871e57c5c60277230412f2d9028.tiff +0 -0
  427. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/979cf5a2fa4079eaf74343d6ff5e1b51.tiff +0 -0
  428. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/9dd40c0127d217bc4917e4db40e06e94.tiff +0 -0
  429. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/9ed8ec7bf90653bc4ca86b3ca53cbb96.tiff +0 -0
  430. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/a04310d441e8d2c7a5066627baeec9b6.tiff +0 -0
  431. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_images/fb8886059879eaac70139336cb525838.tiff +0 -0
  432. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/panda/train_with_noisy_labels.csv +0 -0
  433. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_test_x.h5 +0 -0
  434. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_test_y.h5 +0 -0
  435. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_train_x.h5 +0 -0
  436. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_train_y.h5 +0 -0
  437. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_valid_x.h5 +0 -0
  438. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/patch_camelyon/camelyonpatch_level_2_split_valid_y.h5 +0 -0
  439. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/meta.csv +0 -0
  440. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/ct.nii.gz +0 -0
  441. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/aorta_small.nii.gz +0 -0
  442. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/brain_small.nii.gz +0 -0
  443. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/colon_small.nii.gz +0 -0
  444. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0011/segmentations/semantic_labels/masks.nii.gz +0 -0
  445. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/ct.nii.gz +0 -0
  446. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/aorta_small.nii.gz +0 -0
  447. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/brain_small.nii.gz +0 -0
  448. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/colon_small.nii.gz +0 -0
  449. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0461/segmentations/semantic_labels/masks.nii.gz +0 -0
  450. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/ct.nii.gz +0 -0
  451. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/aorta_small.nii.gz +0 -0
  452. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/brain_small.nii.gz +0 -0
  453. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/colon_small.nii.gz +0 -0
  454. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/total_segmentator/Totalsegmentator_dataset_v201/s0762/segmentations/semantic_labels/masks.nii.gz +0 -0
  455. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/wsi/0/a.tiff +0 -0
  456. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/wsi/0/b.tiff +0 -0
  457. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/wsi/1/a.tiff +0 -0
  458. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/assets/vision/datasets/wsi/manifest.csv +0 -0
  459. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/conftest.py +0 -0
  460. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/__init__.py +0 -0
  461. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/conftest.py +0 -0
  462. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/writers/__init__.py +0 -0
  463. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/writers/embeddings/__init__.py +0 -0
  464. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/callbacks/writers/embeddings/test_classification.py +0 -0
  465. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/__init__.py +0 -0
  466. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/dataloaders/__init__.py +0 -0
  467. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/dataloaders/test_dataloader.py +0 -0
  468. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/datamodules/__init__.py +0 -0
  469. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/datamodules/_utils.py +0 -0
  470. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/datamodules/test_datamodule.py +0 -0
  471. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/datamodules/test_schemas.py +0 -0
  472. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/datasets/__init__.py +0 -0
  473. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/datasets/classification/__init__.py +0 -0
  474. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/datasets/classification/test_embeddings.py +0 -0
  475. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/datasets/classification/test_multi_embeddings.py +0 -0
  476. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/splitting/__init__.py +0 -0
  477. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/__init__.py +0 -0
  478. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/padding/__init__.py +0 -0
  479. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/padding/test_pad_2d_tensor.py +0 -0
  480. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/sampling/__init__.py +0 -0
  481. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/data/transforms/sampling/test_sample_from_axis.py +0 -0
  482. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/__init__.py +0 -0
  483. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/core/__init__.py +0 -0
  484. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/core/test_metric_module.py +0 -0
  485. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/core/test_schemas.py +0 -0
  486. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/defaults/__init__.py +0 -0
  487. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/defaults/classification/__init__.py +0 -0
  488. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/defaults/classification/test_binary.py +0 -0
  489. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/defaults/classification/test_multiclass.py +0 -0
  490. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/test_average_loss.py +0 -0
  491. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/metrics/test_binary_balanced_accuracy.py +0 -0
  492. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/__init__.py +0 -0
  493. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/__init__.py +0 -0
  494. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/conftest.py +0 -0
  495. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/test_head.py +0 -0
  496. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/test_inference.py +0 -0
  497. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/utils/__init__.py +0 -0
  498. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/modules/utils/test_batch_postproces.py +0 -0
  499. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/networks/__init__.py +0 -0
  500. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/networks/test_mlp.py +0 -0
  501. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/wrappers/__init__.py +0 -0
  502. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/wrappers/test_from_function.py +0 -0
  503. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/models/wrappers/test_onnx.py +0 -0
  504. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/test_cli.py +0 -0
  505. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/trainers/__init__.py +0 -0
  506. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/trainers/test_recorder.py +0 -0
  507. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/utils/__init__.py +0 -0
  508. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/core/utils/test_operations.py +0 -0
  509. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/__init__.py +0 -0
  510. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/__init__.py +0 -0
  511. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/__init__.py +0 -0
  512. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/__init__.py +0 -0
  513. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_bach.py +0 -0
  514. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_crc.py +0 -0
  515. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_mhist.py +0 -0
  516. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/classification/test_patch_camelyon.py +0 -0
  517. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/__init__.py +0 -0
  518. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_bcss.py +0 -0
  519. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_consep.py +0 -0
  520. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_lits.py +0 -0
  521. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_monusac.py +0 -0
  522. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/datasets/segmentation/test_total_segmentator.py +0 -0
  523. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/__init__.py +0 -0
  524. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/common/__init__.py +0 -0
  525. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/common/test_resize_and_clamp.py +0 -0
  526. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/common/test_resize_and_crop.py +0 -0
  527. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/normalization/__init__.py +0 -0
  528. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/normalization/functional/__init__.py +0 -0
  529. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/transforms/normalization/functional/test_rescale_intensity.py +0 -0
  530. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/__init__.py +0 -0
  531. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/__init__.py +0 -0
  532. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/samplers/__init__.py +0 -0
  533. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/data/wsi/patching/test_mask.py +0 -0
  534. {kaiko_eva-0.1.1/tests/eva/core → kaiko_eva-0.1.3/tests/eva/vision}/metrics/defaults/segmentation/__init__.py +0 -0
  535. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/__init__.py +0 -0
  536. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/modules/__init__.py +0 -0
  537. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/modules/conftest.py +0 -0
  538. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/__init__.py +0 -0
  539. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/backbones/__init__.py +0 -0
  540. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/backbones/test_registry.py +0 -0
  541. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/decoders/__init__.py +0 -0
  542. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/decoders/segmentation/__init__.py +0 -0
  543. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/networks/test_abmil.py +0 -0
  544. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/wrappers/__init__.py +0 -0
  545. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/wrappers/test_backbone.py +0 -0
  546. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/models/wrappers/test_from_timm.py +0 -0
  547. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/test_vision_cli.py +0 -0
  548. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/utils/__init__.py +0 -0
  549. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/utils/io/__init__.py +0 -0
  550. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/utils/io/test_image.py +0 -0
  551. {kaiko_eva-0.1.1 → kaiko_eva-0.1.3}/tests/eva/vision/utils/test_convert.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kaiko-eva
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary: Evaluation Framework for oncology foundation models.
5
5
  Keywords: machine-learning,evaluation-framework,oncology,foundation-models
6
6
  Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
@@ -468,41 +468,10 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate
468
468
 
469
469
  ## Leaderboards
470
470
 
471
- In this section you will find model benchmarks which were generated with _`eva`_.
471
+ The following table shows the FMs we have evaluated with _`eva`_. For more detailed information about the evaluation process, please refer to our [documentation](https://kaiko-ai.github.io/eva/main/leaderboards/).
472
472
 
473
- ### Table I: WSI and microscopy image tasks
473
+ ![Pathology Leaderboard](./docs/images/leaderboard.svg)
474
474
 
475
- <br />
476
-
477
- <div align="center">
478
-
479
- | Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | CoNSeP | MoNuSAC |
480
- |---------|-------|-------|-------|--------|------------|-------|------------|-------|
481
- | ViT-S/16 _(random)_ <sup>[1]</sup> | 0.411|0.613|0.5|0.752|0.551|0.347|0.489|0.394|
482
- | ViT-S/16 _(ImageNet)_ <sup>[1]</sup> | 0.675|0.936|0.827|0.861|0.751|0.676|0.54|0.512|
483
- | DINO<sub>(p=16)</sub> <sup>[2]</sup> | 0.77|0.936|0.751|0.905|0.869|0.737|0.625|0.549|
484
- | Phikon <sup>[3]</sup> | 0.715|0.942|0.766|0.925|0.879|0.784|0.68|0.554|
485
- | UNI <sup>[4]</sup> | 0.797|0.95|0.835|0.939|0.933|0.774|0.67|0.575|
486
- | ViT-S/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.8|0.949|0.831|0.902|0.897|0.77|0.622|0.573|
487
- | ViT-S/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.825|0.948|0.826|0.887|0.879|0.741|0.677|0.617|
488
- | ViT-B/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.846|0.959|0.839|0.906|0.891|0.753|0.647|0.572|
489
- | ViT-B/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.867|0.952|0.814|0.921|0.939|0.761|0.706|0.661|
490
- | ViT-L/14 _(kaiko.ai)_ <sup>[5]</sup> | 0.862|0.935|0.822|0.907|0.941|0.769|0.686|0.599|
491
-
492
- _Table I: Linear probing evaluation of FMs on patch-level downstream datasets.<br> We report balanced accuracy
493
- for classification tasks and generalized Dice score for semgetnation tasks, averaged over 5 runs. Results are
494
- reported on the "test" split if available and otherwise on the "validation" split._
495
-
496
- </div>
497
-
498
- <br />
499
-
500
- _References_:
501
- 1. _"Emerging properties in self-supervised vision transformers”_, [arXiv](https://arxiv.org/abs/2104.14294)
502
- 2. _"Benchmarking self-supervised learning on diverse pathology datasets”_, [arXiv](https://arxiv.org/abs/2212.04690)
503
- 3. _"Scaling self-supervised learning for histopathology with masked image modeling”_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1)
504
- 4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_, [arXiv](https://arxiv.org/abs/2308.15474)
505
- 5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_, [arXiv](https://arxiv.org/pdf/2404.15217)
506
475
 
507
476
  ## Contributing
508
477
 
@@ -212,41 +212,10 @@ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate
212
212
 
213
213
  ## Leaderboards
214
214
 
215
- In this section you will find model benchmarks which were generated with _`eva`_.
215
+ The following table shows the FMs we have evaluated with _`eva`_. For more detailed information about the evaluation process, please refer to our [documentation](https://kaiko-ai.github.io/eva/main/leaderboards/).
216
216
 
217
- ### Table I: WSI and microscopy image tasks
217
+ ![Pathology Leaderboard](./docs/images/leaderboard.svg)
218
218
 
219
- <br />
220
-
221
- <div align="center">
222
-
223
- | Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | CoNSeP | MoNuSAC |
224
- |---------|-------|-------|-------|--------|------------|-------|------------|-------|
225
- | ViT-S/16 _(random)_ <sup>[1]</sup> | 0.411|0.613|0.5|0.752|0.551|0.347|0.489|0.394|
226
- | ViT-S/16 _(ImageNet)_ <sup>[1]</sup> | 0.675|0.936|0.827|0.861|0.751|0.676|0.54|0.512|
227
- | DINO<sub>(p=16)</sub> <sup>[2]</sup> | 0.77|0.936|0.751|0.905|0.869|0.737|0.625|0.549|
228
- | Phikon <sup>[3]</sup> | 0.715|0.942|0.766|0.925|0.879|0.784|0.68|0.554|
229
- | UNI <sup>[4]</sup> | 0.797|0.95|0.835|0.939|0.933|0.774|0.67|0.575|
230
- | ViT-S/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.8|0.949|0.831|0.902|0.897|0.77|0.622|0.573|
231
- | ViT-S/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.825|0.948|0.826|0.887|0.879|0.741|0.677|0.617|
232
- | ViT-B/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.846|0.959|0.839|0.906|0.891|0.753|0.647|0.572|
233
- | ViT-B/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.867|0.952|0.814|0.921|0.939|0.761|0.706|0.661|
234
- | ViT-L/14 _(kaiko.ai)_ <sup>[5]</sup> | 0.862|0.935|0.822|0.907|0.941|0.769|0.686|0.599|
235
-
236
- _Table I: Linear probing evaluation of FMs on patch-level downstream datasets.<br> We report balanced accuracy
237
- for classification tasks and generalized Dice score for semgetnation tasks, averaged over 5 runs. Results are
238
- reported on the "test" split if available and otherwise on the "validation" split._
239
-
240
- </div>
241
-
242
- <br />
243
-
244
- _References_:
245
- 1. _"Emerging properties in self-supervised vision transformers”_, [arXiv](https://arxiv.org/abs/2104.14294)
246
- 2. _"Benchmarking self-supervised learning on diverse pathology datasets”_, [arXiv](https://arxiv.org/abs/2212.04690)
247
- 3. _"Scaling self-supervised learning for histopathology with masked image modeling”_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1)
248
- 4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_, [arXiv](https://arxiv.org/abs/2308.15474)
249
- 5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_, [arXiv](https://arxiv.org/pdf/2404.15217)
250
219
 
251
220
  ## Contributing
252
221
 
@@ -6,7 +6,7 @@ build-backend = "pdm.backend"
6
6
 
7
7
  [project]
8
8
  name = "kaiko-eva"
9
- version = "0.1.1"
9
+ version = "0.1.3"
10
10
  description = "Evaluation Framework for oncology foundation models."
11
11
  keywords = [
12
12
  "machine-learning",
@@ -172,15 +172,14 @@ class EmbeddingsWriter(callbacks.BasePredictionWriter, abc.ABC):
172
172
 
173
173
  def _check_if_exists(self) -> None:
174
174
  """Checks if the output directory already exists and if it should be overwritten."""
175
- try:
176
- os.makedirs(self._output_dir, exist_ok=self._overwrite)
177
- except FileExistsError as e:
175
+ os.makedirs(self._output_dir, exist_ok=True)
176
+ if os.path.exists(os.path.join(self._output_dir, "manifest.csv")) and not self._overwrite:
178
177
  raise FileExistsError(
179
178
  f"The embeddings output directory already exists: {self._output_dir}. This "
180
179
  "either means that they have been computed before or that a wrong output "
181
180
  "directory is being used. Consider using `eva fit` instead, selecting a "
182
181
  "different output directory or setting overwrite=True."
183
- ) from e
182
+ )
184
183
  os.makedirs(self._output_dir, exist_ok=True)
185
184
 
186
185
 
@@ -38,7 +38,7 @@ class DataLoader:
38
38
  Mutually exclusive with `batch_size`, `shuffle`, `sampler` and `drop_last`.
39
39
  """
40
40
 
41
- num_workers: int = multiprocessing.cpu_count()
41
+ num_workers: int | None = None
42
42
  """How many workers to use for loading the data.
43
43
 
44
44
  By default, it will use the number of CPUs available.
@@ -71,7 +71,7 @@ class DataLoader:
71
71
  shuffle=self.shuffle,
72
72
  sampler=self.sampler,
73
73
  batch_sampler=self.batch_sampler,
74
- num_workers=self.num_workers,
74
+ num_workers=self.num_workers or multiprocessing.cpu_count(),
75
75
  collate_fn=self.collate_fn,
76
76
  pin_memory=self.pin_memory,
77
77
  drop_last=self.drop_last,
@@ -24,12 +24,13 @@ def random_split(
24
24
  Returns:
25
25
  The indices of the train, validation, and test sets as lists.
26
26
  """
27
- if train_ratio + val_ratio + (test_ratio or 0) != 1:
28
- raise ValueError("The sum of the ratios must be equal to 1.")
27
+ total_ratio = train_ratio + val_ratio + test_ratio
28
+ if total_ratio > 1.0:
29
+ raise ValueError("The sum of the ratios must be lower or equal to 1.")
29
30
 
30
- np.random.seed(seed)
31
- n_samples = len(samples)
32
- indices = np.random.permutation(n_samples)
31
+ random_generator = np.random.default_rng(seed)
32
+ n_samples = int(total_ratio * len(samples))
33
+ indices = random_generator.permutation(len(samples))[:n_samples]
33
34
 
34
35
  n_train = int(np.floor(train_ratio * n_samples))
35
36
  n_val = n_samples - n_train if test_ratio == 0.0 else int(np.floor(val_ratio * n_samples)) or 1
@@ -28,10 +28,11 @@ def stratified_split(
28
28
  """
29
29
  if len(samples) != len(targets):
30
30
  raise ValueError("The number of samples and targets must be equal.")
31
- if train_ratio + val_ratio + (test_ratio or 0) != 1:
32
- raise ValueError("The sum of the ratios must be equal to 1.")
31
+ if train_ratio + val_ratio + (test_ratio or 0) > 1.0:
32
+ raise ValueError("The sum of the ratios must be lower or equal to 1.")
33
33
 
34
- np.random.seed(seed)
34
+ use_all_samples = train_ratio + val_ratio + test_ratio == 1
35
+ random_generator = np.random.default_rng(seed)
35
36
  unique_classes, y_indices = np.unique(targets, return_inverse=True)
36
37
  n_classes = unique_classes.shape[0]
37
38
 
@@ -39,18 +40,23 @@ def stratified_split(
39
40
 
40
41
  for c in range(n_classes):
41
42
  class_indices = np.where(y_indices == c)[0]
42
- np.random.shuffle(class_indices)
43
+ random_generator.shuffle(class_indices)
43
44
 
44
45
  n_train = int(np.floor(train_ratio * len(class_indices))) or 1
45
46
  n_val = (
46
47
  len(class_indices) - n_train
47
- if test_ratio == 0.0
48
+ if test_ratio == 0.0 and use_all_samples
48
49
  else int(np.floor(val_ratio * len(class_indices))) or 1
49
50
  )
50
51
 
51
52
  train_indices.extend(class_indices[:n_train])
52
53
  val_indices.extend(class_indices[n_train : n_train + n_val])
53
54
  if test_ratio > 0.0:
54
- test_indices.extend(class_indices[n_train + n_val :])
55
+ n_test = (
56
+ len(class_indices) - n_train - n_val
57
+ if use_all_samples
58
+ else int(np.floor(test_ratio * len(class_indices))) or 1
59
+ )
60
+ test_indices.extend(class_indices[n_train + n_val : n_train + n_val + n_test])
55
61
 
56
62
  return train_indices, val_indices, test_indices or None
@@ -0,0 +1,5 @@
1
+ """Loss functions API."""
2
+
3
+ from eva.core.losses.cross_entropy import CrossEntropyLoss
4
+
5
+ __all__ = ["CrossEntropyLoss"]
@@ -0,0 +1,27 @@
1
+ """Cross-entropy based loss function."""
2
+
3
+ from typing import Sequence
4
+
5
+ import torch
6
+ from torch import nn
7
+
8
+
9
+ class CrossEntropyLoss(nn.CrossEntropyLoss):
10
+ """A wrapper around torch.nn.CrossEntropyLoss that accepts weights in list format.
11
+
12
+ Needed for .yaml file loading & class instantiation with jsonarparse.
13
+ """
14
+
15
+ def __init__(
16
+ self, *args, weight: Sequence[float] | torch.Tensor | None = None, **kwargs
17
+ ) -> None:
18
+ """Initialize the loss function.
19
+
20
+ Args:
21
+ args: Positional arguments from the base class.
22
+ weight: A list of weights to assign to each class.
23
+ kwargs: Key-word arguments from the base class.
24
+ """
25
+ if weight is not None and not isinstance(weight, torch.Tensor):
26
+ weight = torch.tensor(weight)
27
+ super().__init__(*args, **kwargs, weight=weight)
@@ -3,8 +3,6 @@
3
3
  from eva.core.metrics.average_loss import AverageLoss
4
4
  from eva.core.metrics.binary_balanced_accuracy import BinaryBalancedAccuracy
5
5
  from eva.core.metrics.defaults import BinaryClassificationMetrics, MulticlassClassificationMetrics
6
- from eva.core.metrics.generalized_dice import GeneralizedDiceScore
7
- from eva.core.metrics.mean_iou import MeanIoU
8
6
  from eva.core.metrics.structs import Metric, MetricCollection, MetricModule, MetricsSchema
9
7
 
10
8
  __all__ = [
@@ -12,8 +10,6 @@ __all__ = [
12
10
  "BinaryBalancedAccuracy",
13
11
  "BinaryClassificationMetrics",
14
12
  "MulticlassClassificationMetrics",
15
- "GeneralizedDiceScore",
16
- "MeanIoU",
17
13
  "Metric",
18
14
  "MetricCollection",
19
15
  "MetricModule",
@@ -4,10 +4,8 @@ from eva.core.metrics.defaults.classification import (
4
4
  BinaryClassificationMetrics,
5
5
  MulticlassClassificationMetrics,
6
6
  )
7
- from eva.core.metrics.defaults.segmentation import MulticlassSegmentationMetrics
8
7
 
9
8
  __all__ = [
10
9
  "MulticlassClassificationMetrics",
11
10
  "BinaryClassificationMetrics",
12
- "MulticlassSegmentationMetrics",
13
11
  ]
@@ -1,10 +1,10 @@
1
1
  """Base model module."""
2
2
 
3
+ import os
3
4
  from typing import Any, Mapping
4
5
 
5
6
  import lightning.pytorch as pl
6
7
  import torch
7
- from lightning.pytorch.strategies.single_device import SingleDeviceStrategy
8
8
  from lightning.pytorch.utilities import memory
9
9
  from lightning.pytorch.utilities.types import STEP_OUTPUT
10
10
  from typing_extensions import override
@@ -49,14 +49,14 @@ class ModelModule(pl.LightningModule):
49
49
 
50
50
  @property
51
51
  def metrics_device(self) -> torch.device:
52
- """Returns the device by which the metrics should be calculated.
53
-
54
- We allocate the metrics to CPU when operating on single device, as
55
- it is much faster, but to GPU when employing multiple ones, as DDP
56
- strategy requires the metrics to be allocated to the module's GPU.
57
- """
58
- move_to_cpu = isinstance(self.trainer.strategy, SingleDeviceStrategy)
59
- return torch.device("cpu") if move_to_cpu else self.device
52
+ """Returns the device by which the metrics should be calculated."""
53
+ device = os.getenv("METRICS_DEVICE", None)
54
+ if device is not None:
55
+ return torch.device(device)
56
+ elif self.device.type == "mps":
57
+ # mps seems to have compatibility issues with segmentation metrics
58
+ return torch.device("cpu")
59
+ return self.device
60
60
 
61
61
  @override
62
62
  def on_fit_start(self) -> None:
@@ -0,0 +1,41 @@
1
+ """Transforms for extracting the CLS output from a model output."""
2
+
3
+ import torch
4
+ from transformers import modeling_outputs
5
+
6
+
7
+ class ExtractCLSFeatures:
8
+ """Extracts the CLS token from a ViT model output."""
9
+
10
+ def __init__(
11
+ self, cls_index: int = 0, num_register_tokens: int = 0, include_patch_tokens: bool = False
12
+ ) -> None:
13
+ """Initializes the transformation.
14
+
15
+ Args:
16
+ cls_index: The index of the CLS token in the output tensor.
17
+ num_register_tokens: The number of register tokens in the model output.
18
+ include_patch_tokens: Whether to concat the mean aggregated patch tokens with
19
+ the cls token.
20
+ """
21
+ self._cls_index = cls_index
22
+ self._num_register_tokens = num_register_tokens
23
+ self._include_patch_tokens = include_patch_tokens
24
+
25
+ def __call__(
26
+ self, tensor: torch.Tensor | modeling_outputs.BaseModelOutputWithPooling
27
+ ) -> torch.Tensor:
28
+ """Call method for the transformation.
29
+
30
+ Args:
31
+ tensor: The tensor representing the model output.
32
+ """
33
+ if isinstance(tensor, modeling_outputs.BaseModelOutputWithPooling):
34
+ tensor = tensor.last_hidden_state
35
+
36
+ cls_token = tensor[:, self._cls_index, :]
37
+ if self._include_patch_tokens:
38
+ patch_tokens = tensor[:, 1 + self._num_register_tokens :, :]
39
+ return torch.cat([cls_token, patch_tokens.mean(1)], dim=-1)
40
+
41
+ return cls_token
@@ -0,0 +1,59 @@
1
+ """Transforms for extracting the patch features from a model output."""
2
+
3
+ import math
4
+ from typing import List
5
+
6
+ import torch
7
+ from transformers import modeling_outputs
8
+
9
+
10
+ class ExtractPatchFeatures:
11
+ """Extracts the patch features from a ViT model output."""
12
+
13
+ def __init__(
14
+ self,
15
+ has_cls_token: bool = True,
16
+ num_register_tokens: int = 0,
17
+ ignore_remaining_dims: bool = False,
18
+ ) -> None:
19
+ """Initializes the transformation.
20
+
21
+ Args:
22
+ has_cls_token: If set to `True`, the model output is expected to have
23
+ a classification token.
24
+ num_register_tokens: The number of register tokens in the model output.
25
+ ignore_remaining_dims: If set to `True`, ignore the remaining dimensions
26
+ of the patch grid if it is not a square number.
27
+ """
28
+ self._has_cls_token = has_cls_token
29
+ self._num_register_tokens = num_register_tokens
30
+ self._ignore_remaining_dims = ignore_remaining_dims
31
+
32
+ def __call__(
33
+ self, tensor: torch.Tensor | modeling_outputs.BaseModelOutputWithPooling
34
+ ) -> List[torch.Tensor]:
35
+ """Call method for the transformation.
36
+
37
+ Args:
38
+ tensor: The raw embeddings of the model.
39
+
40
+ Returns:
41
+ A tensor (batch_size, hidden_size, n_patches_height, n_patches_width)
42
+ representing the model output.
43
+ """
44
+ num_skip = int(self._has_cls_token) + self._num_register_tokens
45
+ if isinstance(tensor, modeling_outputs.BaseModelOutputWithPooling):
46
+ features = tensor.last_hidden_state[:, num_skip:, :].permute(0, 2, 1)
47
+ else:
48
+ features = tensor[:, num_skip:, :].permute(0, 2, 1)
49
+
50
+ batch_size, hidden_size, patch_grid = features.shape
51
+ height = width = int(math.sqrt(patch_grid))
52
+ if height * width != patch_grid:
53
+ if self._ignore_remaining_dims:
54
+ features = features[:, :, -height * width :]
55
+ else:
56
+ raise ValueError(f"Patch grid size must be a square number {patch_grid}.")
57
+ patch_embeddings = features.view(batch_size, hidden_size, height, width)
58
+
59
+ return [patch_embeddings]
@@ -0,0 +1,15 @@
1
+ """Progress bar utility functions."""
2
+
3
+ import os
4
+
5
+ from tqdm import tqdm as _tqdm
6
+
7
+
8
+ def tqdm(*args, **kwargs) -> _tqdm:
9
+ """Wrapper function for `tqdm.tqdm`."""
10
+ refresh_rate = os.environ.get("TQDM_REFRESH_RATE")
11
+ refresh_rate = int(refresh_rate) if refresh_rate is not None else None
12
+ disable = bool(int(os.environ.get("TQDM_DISABLE", 0))) or (refresh_rate == 0)
13
+ kwargs.setdefault("disable", disable)
14
+ kwargs.setdefault("miniters", refresh_rate)
15
+ return _tqdm(*args, **kwargs)
@@ -6,6 +6,7 @@ from eva.vision.data.datasets.classification import (
6
6
  MHIST,
7
7
  PANDA,
8
8
  Camelyon16,
9
+ PANDASmall,
9
10
  PatchCamelyon,
10
11
  WsiClassificationDataset,
11
12
  )
@@ -15,6 +16,7 @@ from eva.vision.data.datasets.segmentation import (
15
16
  EmbeddingsSegmentationDataset,
16
17
  ImageSegmentation,
17
18
  LiTS,
19
+ LiTSBalanced,
18
20
  MoNuSAC,
19
21
  TotalSegmentator2D,
20
22
  )
@@ -27,6 +29,7 @@ __all__ = [
27
29
  "CRC",
28
30
  "MHIST",
29
31
  "PANDA",
32
+ "PANDASmall",
30
33
  "Camelyon16",
31
34
  "PatchCamelyon",
32
35
  "WsiClassificationDataset",
@@ -34,6 +37,7 @@ __all__ = [
34
37
  "EmbeddingsSegmentationDataset",
35
38
  "ImageSegmentation",
36
39
  "LiTS",
40
+ "LiTSBalanced",
37
41
  "MoNuSAC",
38
42
  "TotalSegmentator2D",
39
43
  "VisionDataset",
@@ -4,7 +4,7 @@ from eva.vision.data.datasets.classification.bach import BACH
4
4
  from eva.vision.data.datasets.classification.camelyon16 import Camelyon16
5
5
  from eva.vision.data.datasets.classification.crc import CRC
6
6
  from eva.vision.data.datasets.classification.mhist import MHIST
7
- from eva.vision.data.datasets.classification.panda import PANDA
7
+ from eva.vision.data.datasets.classification.panda import PANDA, PANDASmall
8
8
  from eva.vision.data.datasets.classification.patch_camelyon import PatchCamelyon
9
9
  from eva.vision.data.datasets.classification.wsi import WsiClassificationDataset
10
10
 
@@ -15,5 +15,6 @@ __all__ = [
15
15
  "PatchCamelyon",
16
16
  "WsiClassificationDataset",
17
17
  "PANDA",
18
+ "PANDASmall",
18
19
  "Camelyon16",
19
20
  ]
@@ -87,6 +87,7 @@ class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification):
87
87
  target_mpp: float = 0.5,
88
88
  backend: str = "openslide",
89
89
  image_transforms: Callable | None = None,
90
+ coords_path: str | None = None,
90
91
  seed: int = 42,
91
92
  ) -> None:
92
93
  """Initializes the dataset.
@@ -100,6 +101,7 @@ class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification):
100
101
  target_mpp: Target microns per pixel (mpp) for the patches.
101
102
  backend: The backend to use for reading the whole-slide images.
102
103
  image_transforms: Transforms to apply to the extracted image patches.
104
+ coords_path: File path to save the patch coordinates as .csv.
103
105
  seed: Random seed for reproducibility.
104
106
  """
105
107
  self._split = split
@@ -119,6 +121,7 @@ class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification):
119
121
  target_mpp=target_mpp,
120
122
  backend=backend,
121
123
  image_transforms=image_transforms,
124
+ coords_path=coords_path,
122
125
  )
123
126
 
124
127
  @property
@@ -207,7 +210,7 @@ class Camelyon16(wsi.MultiWsiDataset, base.ImageClassification):
207
210
 
208
211
  @override
209
212
  def load_metadata(self, index: int) -> Dict[str, Any]:
210
- return {"wsi_id": self.filename(index).split(".")[0]}
213
+ return wsi.MultiWsiDataset.load_metadata(self, index)
211
214
 
212
215
  def _load_file_paths(self, split: Literal["train", "val", "test"] | None = None) -> List[str]:
213
216
  """Loads the file paths of the corresponding dataset split."""
@@ -49,6 +49,7 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
49
49
  target_mpp: float = 0.5,
50
50
  backend: str = "openslide",
51
51
  image_transforms: Callable | None = None,
52
+ coords_path: str | None = None,
52
53
  seed: int = 42,
53
54
  ) -> None:
54
55
  """Initializes the dataset.
@@ -62,6 +63,7 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
62
63
  target_mpp: Target microns per pixel (mpp) for the patches.
63
64
  backend: The backend to use for reading the whole-slide images.
64
65
  image_transforms: Transforms to apply to the extracted image patches.
66
+ coords_path: File path to save the patch coordinates as .csv.
65
67
  seed: Random seed for reproducibility.
66
68
  """
67
69
  self._split = split
@@ -80,6 +82,7 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
80
82
  target_mpp=target_mpp,
81
83
  backend=backend,
82
84
  image_transforms=image_transforms,
85
+ coords_path=coords_path,
83
86
  )
84
87
 
85
88
  @property
@@ -132,7 +135,7 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
132
135
 
133
136
  @override
134
137
  def load_metadata(self, index: int) -> Dict[str, Any]:
135
- return {"wsi_id": self.filename(index).split(".")[0]}
138
+ return wsi.MultiWsiDataset.load_metadata(self, index)
136
139
 
137
140
  def _load_file_paths(self, split: Literal["train", "val", "test"] | None = None) -> List[str]:
138
141
  """Loads the file paths of the corresponding dataset split."""
@@ -182,3 +185,16 @@ class PANDA(wsi.MultiWsiDataset, base.ImageClassification):
182
185
 
183
186
  def _get_id_from_path(self, file_path: str) -> str:
184
187
  return os.path.basename(file_path).replace(".tiff", "")
188
+
189
+
190
+ class PANDASmall(PANDA):
191
+ """Small version of the PANDA dataset for quicker benchmarking."""
192
+
193
+ _train_split_ratio: float = 0.1
194
+ """Train split ratio."""
195
+
196
+ _val_split_ratio: float = 0.05
197
+ """Validation split ratio."""
198
+
199
+ _test_split_ratio: float = 0.05
200
+ """Test split ratio."""
@@ -35,6 +35,7 @@ class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification):
35
35
  split: Literal["train", "val", "test"] | None = None,
36
36
  image_transforms: Callable | None = None,
37
37
  column_mapping: Dict[str, str] = default_column_mapping,
38
+ coords_path: str | None = None,
38
39
  ):
39
40
  """Initializes the dataset.
40
41
 
@@ -51,6 +52,7 @@ class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification):
51
52
  split: The split of the dataset to load.
52
53
  image_transforms: Transforms to apply to the extracted image patches.
53
54
  column_mapping: Mapping of the columns in the manifest file.
55
+ coords_path: File path to save the patch coordinates as .csv.
54
56
  """
55
57
  self._split = split
56
58
  self._column_mapping = self.default_column_mapping | column_mapping
@@ -66,6 +68,7 @@ class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification):
66
68
  target_mpp=target_mpp,
67
69
  backend=backend,
68
70
  image_transforms=image_transforms,
71
+ coords_path=coords_path,
69
72
  )
70
73
 
71
74
  @override
@@ -88,7 +91,7 @@ class WsiClassificationDataset(wsi.MultiWsiDataset, base.ImageClassification):
88
91
 
89
92
  @override
90
93
  def load_metadata(self, index: int) -> Dict[str, Any]:
91
- return {"wsi_id": self.filename(index).split(".")[0]}
94
+ return wsi.MultiWsiDataset.load_metadata(self, index)
92
95
 
93
96
  def _load_manifest(self, manifest_path: str) -> pd.DataFrame:
94
97
  df = pd.read_csv(manifest_path)
@@ -5,6 +5,7 @@ from eva.vision.data.datasets.segmentation.bcss import BCSS
5
5
  from eva.vision.data.datasets.segmentation.consep import CoNSeP
6
6
  from eva.vision.data.datasets.segmentation.embeddings import EmbeddingsSegmentationDataset
7
7
  from eva.vision.data.datasets.segmentation.lits import LiTS
8
+ from eva.vision.data.datasets.segmentation.lits_balanced import LiTSBalanced
8
9
  from eva.vision.data.datasets.segmentation.monusac import MoNuSAC
9
10
  from eva.vision.data.datasets.segmentation.total_segmentator_2d import TotalSegmentator2D
10
11
 
@@ -14,6 +15,7 @@ __all__ = [
14
15
  "CoNSeP",
15
16
  "EmbeddingsSegmentationDataset",
16
17
  "LiTS",
18
+ "LiTSBalanced",
17
19
  "MoNuSAC",
18
20
  "TotalSegmentator2D",
19
21
  ]
@@ -37,8 +37,8 @@ class CoNSeP(wsi.MultiWsiDataset, base.ImageSegmentation):
37
37
  root: str,
38
38
  sampler: samplers.Sampler | None = None,
39
39
  split: Literal["train", "val"] | None = None,
40
- width: int = 224,
41
- height: int = 224,
40
+ width: int = 250,
41
+ height: int = 250,
42
42
  target_mpp: float = 0.25,
43
43
  transforms: Callable | None = None,
44
44
  ) -> None: